From c7ed8f77ccf6bbbcc47951cd7677357fff28a07d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Dec 2023 16:24:37 +0000 Subject: [PATCH 001/157] chore(deps): bump golang.org/x/net from 0.18.0 to 0.19.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.18.0 to 0.19.0. - [Commits](https://github.com/golang/net/compare/v0.18.0...v0.19.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 27968de14..fd182911c 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 - golang.org/x/net v0.18.0 + golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 k8s.io/api v0.28.4 diff --git a/go.sum b/go.sum index 2f2dc83d4..421aaf57d 100644 --- a/go.sum +++ b/go.sum @@ -347,8 +347,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/modules.txt b/vendor/modules.txt index 2c01ed811..5709e22a5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -549,7 +549,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.18.0 +# golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html From 6eb6315bb732f50d9fe2fe7a7bc27512a3485647 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Tue, 5 Dec 2023 02:44:10 +0000 Subject: [PATCH 002/157] chore: support deploying aznfs-mount in deploy dir --- deploy/csi-blob-node.yaml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/deploy/csi-blob-node.yaml b/deploy/csi-blob-node.yaml index af6190f30..1d7718064 100644 --- a/deploy/csi-blob-node.yaml +++ b/deploy/csi-blob-node.yaml @@ -130,6 +130,7 @@ spec: - "--nodeid=$(KUBE_NODE_NAME)" - "--user-agent-suffix=OSS-kubectl" - "--metrics-address=0.0.0.0:29635" + - "--enable-aznfs-mount=true" ports: - containerPort: 29633 name: healthz @@ -170,12 +171,36 @@ spec: name: azure-cred - mountPath: /mnt name: blob-cache + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true resources: limits: memory: 2100Mi requests: cpu: 10m memory: 20Mi + - name: aznfswatchdog + image: mcr.microsoft.com/k8s/csi/blob-csi:latest + command: + - "aznfswatchdog" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir volumes: - name: host-usr hostPath: @@ -206,4 +231,12 @@ spec: path: /mnt type: DirectoryOrCreate name: blob-cache + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate --- From 8b18867a0ff41911ac6764ceea69a96bbadf4cb8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:07:10 +0000 Subject: [PATCH 003/157] chore(deps): bump actions/setup-go from 4 to 5 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/darwin.yaml | 2 +- .github/workflows/linux.yaml | 2 +- .github/workflows/static.yaml | 2 +- .github/workflows/trivy.yaml | 2 +- .github/workflows/windows.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 35a37aa13..954f8dd4e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,7 +38,7 @@ jobs: steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.18 id: go diff --git a/.github/workflows/darwin.yaml b/.github/workflows/darwin.yaml index 83681c3f1..da224897e 100644 --- a/.github/workflows/darwin.yaml +++ b/.github/workflows/darwin.yaml @@ -8,7 +8,7 @@ jobs: runs-on: macos-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.16 id: go diff --git a/.github/workflows/linux.yaml b/.github/workflows/linux.yaml index 80cbaf9b4..011ac4c03 100644 --- a/.github/workflows/linux.yaml +++ b/.github/workflows/linux.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.16 id: go diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index 60e9ac030..0445903bc 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.19 - uses: actions/checkout@master diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml index 55c85e579..9690bbb29 100644 --- a/.github/workflows/trivy.yaml +++ b/.github/workflows/trivy.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.16 id: go diff --git a/.github/workflows/windows.yaml b/.github/workflows/windows.yaml index fbba0c715..6b5e66aa4 100644 --- a/.github/workflows/windows.yaml +++ b/.github/workflows/windows.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ${{ matrix.platform }} steps: - name: Set up Go 1.x - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ^1.16 id: go From ef64c348d94a44f6a25edbfc193a4fbecf7e622d Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 7 Dec 2023 02:38:19 +0000 Subject: [PATCH 004/157] feat: install blobfuse 2.1.2 as default version on v1.22.4, v1.23.2 --- charts/v1.22.4/blob-csi-driver-v1.22.4.tgz | Bin 5839 -> 5841 bytes charts/v1.22.4/blob-csi-driver/values.yaml | 2 +- charts/v1.23.2/blob-csi-driver-v1.23.2.tgz | Bin 5881 -> 5881 bytes charts/v1.23.2/blob-csi-driver/values.yaml | 2 +- deploy/v1.22.4/csi-blob-node.yaml | 2 +- deploy/v1.23.2/csi-blob-node.yaml | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/v1.22.4/blob-csi-driver-v1.22.4.tgz b/charts/v1.22.4/blob-csi-driver-v1.22.4.tgz index d09dbfdda03f9a1e4caf710d855ceb2956168b7e..6000ed2e3cbbddb000023ece689533356fc1b493 100644 GIT binary patch delta 4887 zcmV+y6X@*EEzvEIJAZxqbK5o&aDUccfl+#Mz57DF{OW1OKRA|>)5MWImXqFnosI`4 zR}#)7zyhFR)%E}W4FFPnfqGf7bN30IPAqb#J$>H%2t$+4m`{eKk)Y=^j)g=-V z^B-Ezrd4d*e@Ve1`2t)JM!RU%uHi7u{A_eOjl-G;!sR5C`r+9h5eM|nXLo2M8OIYq z=c55K(o>^2fG#S?RBcwo_}Y!$mqjVg{~7ip5W7_X%jbW)-EOsu^Z&5Z+0Xx%C?voW z=++Rr!vd}$WPe>0xV#Y%move}vf;8oz4h6hf3seVqc;WC)!gEv`bL;4to@sg$AUHZx2&2y1%G6-|l7y=c0Oj>#nA z;DpR`Tz^I-CKPy6;d3-HC*)*?B_v%sZ`SnltH}39hMO+>^|#s^n{&hKE30)+41n3R=MKwkJ{T9Wr2peF5z7pS8Z8lXrG-e#l zS=2xykg7Yz*cX7vSUp5vG1U(v0qQqL>?>_3dVh>c&U_zuSB_i!)~!t6#9kDphe?caq22#L!ri$GKtkMEfd)D z^gq6jq@`Ns0*P?CxGsqrkVs{Oxg)KK%mXKW-ZL}Gt?s@=0ZZ?xPBxgaL46D5*Pc5 z`Uu=eL2G$~2Z<(%Q`Mg=3>lXZRWBt&Jb$8SG3gK%h%(9$B#M}_-Ld=^U7+pSg`J=MxUPqkWRN-TuUGBuQ3D)n@(`zxK* zU4)eabM#BI)_B#>BE&9qks1)k^j{4rSz}-OL8y9nQD-6kQzqmPY5YZOxQ@Z9ioVm{Do(^0GSiNsGi{C^&S$U?bcq@*AyRqG_REbXl()_x|j)LN>h(Rn6~ z;zItdx(<)WL`~;d#VPGS0;O6Rj~_|q1CO<@#^RuRn0HZKuN0j5(8z13Kub_PrpQ87 zTVbpBABd15c>&`wxU!4tLpE}!;6*;v*TNr9j!+5XH>ct}gG@~?t!1^O9)G`@Leg&s z)F%|aFh2_5Bh1fS7linQ$mzfXDz!KylRUr=pDCVU;w$acMeX&p%Rxy$096Ry`Rr~S z3Ap0yAw%UZ_gG^jR7mXmNrgoNQs9Lvquy>DHje7HY8^|xPFk_kXg4~hq5$~`KU{`D z-xD8(QIM$_6kgQ-VYY#$W`C@?KZhQc(0jM2m(V-M?i8-DoOV$&&}KRgl`g*_0gNOL z!U5GFJ(AJ8fwHx(fCoefWBIgBPEN{t_C+;cvw{s%VdeY{wxih3QrK?{9@jKfMj{)sZBQN-Wg zqF(>q=MQhuVEF#>E&Ao`dN>$OuOu<`Wt{sMaX8>4WT| z<_zlCv)I;ErHgs~{-@&S%(LC`hf62~auc#bn-~UzxVIvHu$yjJ3gh zcGi>f21qkCYI~z<0EQZgNAD?>{;~mq*yY@97n{f<7t1L8^}^E^z>9mj-%J_y~4_s>iK^= z1wRBX8ZvxlC}_p}?;IQ-7Uuu)(aApk^CIQ(vH5p2BSBYN31i}eoQH54sBUqm&_#c5 zK0VcH8m#{iV(Mw*fyS<1ZExt50S))~=pYZqgaWE-BT&KBk(0i{kuOohWEtqG+4y22 zjtYPBat53e573{=Ql&0BKG1)Xz*vWZLr}mEI2BRP5$_ZlDVB5n+AWfR?@p<>Y9p0Wm?U$ahO-}>u2~>F9~)1 z$g65i1!pb`LPqrtBI92#!-bejhmALGd2ISRck_JPlPE{R`#2f-vqjE?9pNJVL18OZ z`4jyfF`4Vtr>C0Q05!z4sm0LNgjhZsS$5)?lnqRn*GRH|?||bDTl=Xn<88ay6H+B(+MoChZb=;)x8z1$v5n^b?*06s@H!@ zkYvlWkmc)tr`0|w*#Dh&tF_PndXaKZsMke3ZP*7uWi6W!hQm-aXYE=N^GrQFYOW>9 zBoz-DFAoMP_B~gb@HG^#7RHCAqo#kW&>~b`HvGkM@h75$dO#&QRNx{69dAx!Sa$Tu zdqW~wg}Sj{SGv=S44Z2+YGbv$!lf)9n~KT&i^*p?UV9vKoNU0$x*hj(sYURcP|_e+|PSpf2=GiiSVX`M{Q zLD#Z$7OMS;vQ!HVG6JX`YNwlJkP=^ShJ51v&C*^@rqpZpWl|N}B3o=>sdk=t#n9!# z*mjA`fTZ>EIWxnea>>8WU`sLN9OR3E&zoRObdmPzRtZ{G9i&0-j36zKWyTKPT;Ehn zb&*&$c)kY7T;Q>S@-m^zc36MGK3pYemNb#fV3poxjoW0n^C=n&)&;oAu)pF=?LpJ$ z2&w&O7hOkGk^p*U2f4I;nea$TI2SY7pv9@JoATsj;_#KKaX01O zArnsPqGp`?*$~XVFA9IcRVL?i)flJwq50vR=`f-WOO}S&M7B*hJjok0F3-I@@RrkkR}|No z?D=V3C6ESEkH%u!9f%Tu$v$yH#^tHqI>~dJW~*3wW~uz=@oax1{!0X0mFn5Ch4|;V zT`B!KNp}sI*V~Joih3DiS>80vmV&(<{`J-@XI3S0aZZDM5drFjj8J*{<4@xgKe*`IjKcSv1;w`N{n40AKH=~q z%&%cw21te_&RXT*IckSzfz#=>s`o7KlDw%*%4M-3StV&VY9mS>y3J;_DG@Ji6pGp= z%@{-VdQpGy5(fTxb8|Jiy}G{q*T3!b%UUdhw}8~TzO3MI9R@e&ZxiU_M-o#f3Ysra@Jo(H^A?yj!-%e)phpCNMiS1E5XXHFbwvGUk2Bg!%zL;jTZZ> zvzvcb?1w zGa>w@3tSe}I|uAtQh#pI56fp#hFR?(Y?!=GR3C%KIDT1HGsvCrzBq_Q0^vPry4aCC z60)g%3a%`S$mSih#U4V*?8>b}_8jSYezN#<%g%jdR=ieURRX!aZ|s@0*4tmK>;1C& z&R&1QBlcHkimB&=V-%P}Xe7V^&KW<|8RUO`U4K>5_uuu^zdWIya64vOPW2{Ryxj>VJtrL z*2qB^7R{=8LB`e{ysM*xJwkmy^m5KO-e)(89$&TkzGg1+5KxjfB3K1;+D&V!5Qj29O>)&!-4p z4U;SoDlS>+skuc!89=>=6FP}65FvCjm{7KO+-nBA5;%4xnUxY64W_|xbaQrbar^G#^4qh;X+fQfzy1l&W4|TxwcKG?zRwPSzDOJ)`^qxg3SdR`)Z!kK0chSH7eAPR<>64!k zDHe%@$PHuL`=s3e?xb8juhz{sQ+Q^llim?CRP@K6de?(5{p;K7{)gMq<@v|ktFxPb zmNDdIY)RH2Jfz$Bq*$#ugL%cx-n(r`f+b)coZ3+p)cclsiAU?$lUoJ!GYht*xprI` z;WJU(xasKWsgq?AAOQ=Lh7uzXr$7Gm@$?eMbKza5@^lerQs0jHDZ5;FlklNu8u z0cVpv6G{O~lZX>D0auf}6F?HzUk^4Cr5y8>ZmabdnO#hfSCbADA0#YCZ+u*t#6*mwqs)tqNRyWHPM6LKcZP2rmM-?L`yO`AWM}DKb|9~swS!-eak{|{Sx|L@C`Qs60X=%p(`Ap_H@NS5x< zR@ji6UmUf865RA|qV?`2%3c1mteQdZ5HHsuOSxFZu>^|O;n={fB%MX~xa#}4d)4~C zQ#RNCo9_fx>i?sYg8uIu9q%W<|NkQ8)%1UAQ0_JVUi0tEzU<4s?8^(6{}%uN|NmvV JU<3fD005cOwzU8N delta 4846 zcmV@L7tpWQmHAdbh$40!Wwin(me zap1q&S!uOet>eQ({co$)D*oGQA0NGHA0Bny9vmOHkB(op+JA@bw@0r~Yj-SEmq1-KxLcG0X|!(o{D+30i{hcyp`%SkBp!_(g*4(OfFZqZ0GjwgW5 zMgwG|r$%uAT~v^%+N_B2r5n93i&C8bGweqocB=rE&;NG2-D(%-|6%7~KmVVjkN{7h zTSMp;3%G)ib$?Od@h&IB9FhRXu=)@Qd03Ly?56GSpT@1iCXqInnH0;eEBC^>2- z0;Pc^o}{y0JqRP;zhpjf=Up@y51G8=Ab`pmLarc}oXv<*6~NacrUW-kD`jXNG>#gr zRU~~f0}4W1a&`;rQB10Ys!E1lr_pY$CP<;+b$C)e$HGsm zIdBEJ0zrP?q=&hwkQ>pJq-w6uA}?MrE~8tYxM%7~t@ipgx`Huqpe`WE5Tk$?M*-u2 z1h^ceP>*i`MFKqZbqZ9IA#{SYxC(`W!?=s4Qih`2%rM;`tm!dVG$j^yqUka^CXl+b13kiYa`jL>p9hVbHTq2wC7LKsRe!QDWL!p6y_5{`h<~ERq(fLB$|z?{qT4wVAf=kpSWSA3?#y6Q z6mI7T?r{+MfV%2;0i=r_6z)^;StwC&w_0uVSStfP)@qq4u@E-P)KGG%)YF;nuXI*- z5mpM!(J#$f<5fe85WCPtYCs&*e>J3JjeYG0q3Yd5orU;MnUF(}Um3q!=m|L`Vt>&; zE{l9mO(>5DeCva!lt9Wj@))RomB@n-sD~IupGJeEdJQRw<-`@2jLR=ZQ3m z3;DO|Iy@c|HJxJ>r?meFlxk%>ek7R>Jl4J%i-Yc9-bHo2QgG%&Bd?(XEkX5|A`4Y* zg{|JdCqjzk1&qhw$}Xx8*~p!O7x_?M3x7O0LM4pfoQm@VGBv%lmerDa{C{E!NxvOX zpHTS1{3w8rFh6x&5aMSdrvneD)Z&m#@&Mm|rg(;lue4Jawb#=w2PORgR3Uihv)geb z;F7cZ43*p5V~vqeA+hf#6&4Lhffuffdb@GhII7#Kbu9HdX~j;Xl~fcUKjHg}5a@g2 z!!QamHG{&7`ajGz(A11I_kZWm!xDP$7WEQ(XV{&>C6?1JY6jX&$Dz{Y*Cc?E#6dWq z8l*=udN)wE)+O+O2w^Or*4wvl%X;=%HDBch=Y^H+Q;Fc;=~#Tl(w%y2@@-qRmBO{^ z9W$X$73-h$j>O1aZ46jOT+6~(eBdk!lO5a@VDD#6BwSDz)O3G8x&fY(G6o}m(+MbM z>WarG`8|rpV{+d`^^4KSnSvjvx4QlZHtKymU#NG!yDU`n%>68r`ebD7EQ>?4srj z^QKR3oAHyHp3{=EF7n`(U|Oq_)CLd`UZY=_wzskWD;kWo!F+n!lk^5ke`yESzC5WE z{l5}|(8m%)b9iyxA2sB?d@_wy@t=d^PQm|sbo};s@Be*{a;OttU zIcN`x_Ef~GrctY1GvrRey<36IxHcq~k$8hbAFu$mf$Y{|ZR<)OISx%M*OP|_)VKl- z*I3xI=yjirCR9mv5p)^uitxxl`GZr|7Hq)2wXH|_{31qiuvC; zI6f@Q|Kp>#`}ohZl!u4r-_eW&U2P?di4Srf!bzaI#hpSI{k{44SgUEU{(Xq4r;P_1 zyMDF3p;HDl+{1%|JQx!SsIHAb1y@H-`U*$BL=BT=pr>Zzi-|ZYf5?j&a85ixe<(|p zy6E^o|49O49iqCEA_MB_zcp{QGwoD1ycq5jt8wI6Sw=dRa63;qHqPBf&Ca+6Nr{D= zP&r2RzX<0qqMmPn`8(b+qCd3HOlhh5|I|_aroK&MSP(E}w;9DIs+ddwj1$^m!(mu( zR2ZB5;Q=LcJ2y5Be`KwscgfT>6^qKWnq}iKuj1Fw@Tp!B>iUsa)tU;K#PJ zzg~t5F_#V-Z`|_O^mT6M`L-ufj)eDdGV-U3oC!O^Mf!unR;uy``YmEI*Q<|@HMIe1 zh-p)cp{)tAd^WP|#4{-ym@u!AWdFv2zXW1II5)Cp+J}+ve=h=aW5!dDzCiYI4%66K zX0THZivc!8xD<5Ll{lsoSXvJ);&Q5cD_WCp*h}l&@2OO;|CS)hmT4i&*Z)qd{kCBL zciOGiKL6`k${nFz7xlDZ9{`oLY(f|gL(!bIYe~#A_3)^iDVV(#(rJtPR}!J zuFa^8)$$6LvV3eRCi5>QpXqq*am;bD0Wa%z+)t${sn#xfxEhIjD_h5S{1VVc;y-F? z2Yp@gXb~zg_+#x{L2?n9Pc(R8AxqycX+mZJ$fM4re+{H{G8qS5%hFk>_6N#REi}jo zpn9mCZk9nxe7za+iT5{4dpVg>ui2MLRcwoFv4y4DdEymAmkVRtB{Bn&*30M242#Mo z|2l&$#gKE5F9tquf-%uW+N)b7XjyfT2DvkWv^J9u+_Q!Uj+V%gyN8YFXp#|p~J zgf81*e+Bz+m7rPDL^6X_dYd(Fli|*%Xe?M4;3~uZk~6gjO`jvA_M=^N6;Vk7=$Rel z()MM-BPror%w&TWr?zg&laq7lk? zvFC9R;^c=PPkX(q{%G_!3!o3!1MWba8nzl@4OgTEiIP+n$&+@cB`mMuUUKZ1G&m<& z;!pBx;{sTR?r}(*-$HS6)LI5{8*w2|+N~TA&{?q?jr7LdlzWFvIIWACaqeeBF!!z~ ze+ZYEoX=HboaTq-hj*sKh&n7;8fFvOHsSCjlQ=Dgd&L(>$0I!RI*oiIi?NV1`C`BV zyBRa0-N`bXbXpQjcTE288Z$l@e&6kcUxK_88=UpeWh&`)?P)&Boa)3N=xlR5{Mm9iop@uRc=94f5;oD z;7I4lnG|Tpcrcw}voalH&7j*1b80HAiGGO{p%d{O2tql}(W^jj&FU*)R{F0bp$ik& zBY#@FwybC##%w>KTU7uZU!$w|qt(0SsH7Q%O<4hT2;zt~=ryRtT`JTyA zOv_Zt@B_$hk&;o(6&;}4W_zc0fBgjeJ@D~-1TLeV=%RyG4kljU7O&g+*CBAiJbUG8 zGw^>uU-fU!uKKP&KZPojuF`0bA;YXNX!MF^N3`?A~%E5Eg z4$lIo(`{AnS>7diQ<;>@Vneb@(r(m7l-zfl&1zF3UfL)WwN08ahU)dAf8Zqy{PX(y za&&Wfb@8u%+v%6JSO#wasdar>!Qna#ZqDB(?B#{(!^O?0_wnZZ;=|2(|4aXTU8?1* zzlv^v-%=f+bRMef?30ni?wwYGm0e*N><_;Tt}ce3`on82_LryE|E$;#fpjzHY?_)y zq3CGBjsFNnS9j2A>v0i2e|xq)Q?YWPp;kocT;A=d2;na=b`@tr_)QnMEUI@7*t?+q z+@c?r&!h~q+CkVbd7Y>}29I(4ysT!BJK!@)~uQ$Fm=tM&4KVyk@nD`8gU{~wm}e_HMKKL7t&isLxBlpkSo_8WIU zncT&8&149jjRxji-|}Rjypyny;Mm8k2x;z4Q4jjz6awJb|C8viHReBM9z5wnsO9tjK>c6H|9^XQ*xt|o z=abA2KO-b((89$&TkyrQ1+5KxiG;qG1;+D&V!5Qj29O>)&!-4p4wEhrDlTd1skuc! z89=>=6FP}65FvCjm{7KO+`k7m(zkTxwue~;uYm`m7^88FuIk^1lZFsX0b7&P5J5)i z@aFy1#qhd6?AaZ$b;>BA5;%4xnUxY64W_|xbbWe$e)I19;@$htBWt)h-}hQZb9VqW z3itR}{LY40LgeO4|7tY27?WBN8ZGOaPpAL7xwz~Pb-?sy`1#XTBujTGRnk=SoKEzxjOGJH76cp%Ez-j)ce!W83?r-2d*RTs^PW%{NncW+#*25i>R9haY=a zgD?H7o2&kZo6*JD$D7O3>wlIp=;J){P^+nyZ+7aqSwC}o_>?N5-b5HljssN0UeVZ6Cwd8lRpzm z0dtdz6Egu9lfDx`5|>{OHWH;A^ObI^^%t34Opq6o5ELIfBu8(4H@D)z(h}Qadlt9i zpu+Tca!1{y-^1L7tx6yEa>kx?ifoAiwRU&pg8GZ^GUdgi($d-=V|$sCY7`qs7Q4<) zE&*S0{_ptU_@H?I_uEcufBxq=N!7nV&1>L+*f(hQ^VcGERQi zOMT|t;L=k=x@K&6IdCtN%oG|NmwNqIjVZIm_@y2$&iKW~att&Beyaf{AnR{XlL8et z5Dlt_RpeGT%N0be_&aUTlaommBY)eN)b>Yyquc+0E8|&fnlj3r-l?TF)h1VjuX7|( zX`49oP*!m{tio7cmMyCOF&la|Jw%#rTc#lkM{R}JWE-z|9{qnKp%tno|b?Y zQvBI|_+C_g_Tq$RQT7&L*sJ;7mumeV3jj6(@Dxw*D*fL&J}T({!`9ya`#hx-c*+}k z=}J(@z_coor8~40Hst0PM`vxI1UJ2#XuW%ha+m)st7gzU#LIQaQZ80;EP>*6I5u!A zNoUbLtonZLUbX&t%I5li^PRv-{eSefp#M8Z$NTU9KTCNr{hu0?d(FSs{QI&m`?4?l U^33J`0ssL2|G!@vGytdo0CmQcwEzGB diff --git a/charts/v1.22.4/blob-csi-driver/values.yaml b/charts/v1.22.4/blob-csi-driver/values.yaml index 78912172c..0f3fd964e 100644 --- a/charts/v1.22.4/blob-csi-driver/values.yaml +++ b/charts/v1.22.4/blob-csi-driver/values.yaml @@ -119,7 +119,7 @@ node: installBlobfuse: true blobfuseVersion: "1.4.5" installBlobfuse2: true - blobfuse2Version: "2.0.5" + blobfuse2Version: "2.1.2" setMaxOpenFileNum: true maxOpenFileNum: "9000000" disableUpdateDB: true diff --git a/charts/v1.23.2/blob-csi-driver-v1.23.2.tgz b/charts/v1.23.2/blob-csi-driver-v1.23.2.tgz index c4b0fb5e697da892595a0999afad25de7b2431ac..91594e0570725bda329592ee1c89b0a022222cf0 100644 GIT binary patch delta 5685 zcmV-57Ru@QE%_~wcz?WzMJ{$1z+$o3U4RppO)Ogw%i&}Jym>LlTsD^2bHCeLX|-Cd z)8k|Pf2-BX{@-e!o_yCnJ~=w>bl$u&djTjr)@n z?32&H1!1&{7VR4L{lw2k=cv)CIUsCKe5oIv{~mBaZ(TM)V}Hpwo&mZT50H_b8pR%T zQAVa}i!8>MZuFt7N_PG)up5BbuL4*<|J&_$tCgMq$L-U@{C|!@JUoML4WS7Za1B1| zBG2XxkJy|EHkA#VdFrjpCJG864n7k^GQRAhCKIB07fgUtkRX&CH6wx2z!J~mS+5@W zf$NT#OYCJA4S%LXCPy3uP+3FB7G%WPf+$r1yc#hjxPDwILG!3_(r9fW>5>Id5MspH z1gcSts`#o(f?lW5Zfzz=nFBpN(XL@egyeXOhN{ZpG_`nCJ;lP0n>nxrxduUg-=>GT zsgN4cjihR}%K|4{Fh;?|CH94SQmegsg|1-=9H^)6AHYk z@L3v}<#V#Y5~40$Hf#F%C~)1e;iijz`L*^6S;#DJ-ZC7#Vg$+&uG35;GL#Vle|G!9t^`3?I#qR+VQ z{!nNW=(HH_fT+eN*v@u}Lcg*Zv}jx?PHkmuD1X$LQYNwbrDXy;j{eVek(>kanImdG zbIxO^E_!&(qjB*B+;tc(HGrv%EDK!(LNYH?k5I{w2NFA2;+m~N5)~18h3A_S?ArbHeSpec7Sio&SV$Qg%D*ZO2Huy50 z0Dro7nragGA!*C!0G>AMnl(Sk&rpwuSPBWAWxIipz%82-No?#Y>SM421+C=`?j@Qj zPE~(0-)CF~RK1i8@qnV$q(fLB$|z?{qRA2okWx))tR}ricVVz83X>&*d+hlxpsxCz z0qLR#h5KB55=zwDtyUX7*787)wOV3Itbc^9U}`A2RO;zM_g6fty9g@AxCMvc|sjJzw?is?JLM=S;{U$S;iFt@MPP6S3+an+2|;CX_=2p19yB zB@i=?90saiC33(A>L5nZ$MGPlUPDS^IkClvaoI(kf1Ms37cYHtz^gDgtGlWNr9KE)`@Brw6~mC`-#M2YpI+@=ZQ433;EaTIy{{cHJw8h zr?meVlxk%>d?c9*Jk-7#i-qoB*?&cKy;5-ILL;rAJS{=>kRtO{ZH2Agzb8V9=mkut zV9PG5582qBgA=$=uY^CG9KI69uTI5!0-2g#TJvg2J$^BTq~8vxODKG1Zs5TOSf1N9 z2=OzK^MM0YYH^4rxrgsRQM|y!RobbG+SRnnQBFSqRS4d?Y%&c5j5xbru78-Cq@HSo zgbImWH-cY*#=t92M!nrQZk*JMs&xwLb>fPhM!V556$QwT`2NZV`i{6T47@~5FY}`Q zFS83YHABVy$#<}X-rHQGF0eg^5tj2VYI@pA$G%eKH^hUn#GXH(8l*=OYS&Yy)(E&q zgfNCr>&=@tc{Tg2im!Bk^MAs+_Nheh?{q4@U}?`CHv76Q+DYFQsvR?*&K2t)^@haA zU2O_j23*U+RJ`Xb@S_dfHDKpwP9$7W8`NxnH@;c=u)_5oq)`cDiPg5q$YRZyxZRX3 z->?N~XiXJ*5RAF3?6vb5C}!%4#~}JW4yIFb-$nJS@z|P!>#4W8{(pNm?tQpiX^MIs zy+SK>gN(mQJ^q0*rBK8_UZYNWcL{CYSTzJHDS*Vk9q%HmRHbEsZ4 z6Y4?yyXF!a+^g0ot?7d7qUHkgrb{Nx@JUV2X~|g^IWQrZ)@sUra~XD4`cZ3S0#puq zA(>a`7pBc^?EZ=dQ-5tQpP%>CWLGwp*{kTy61T7rWJbY39;SGvY`zdLBF5MwRM!`3 z3nqc8{U8?lQE0Ghf>Lcbh1x)BkUMIG>6wgl}# z(VmJ>*EDLi8;0yT*moO{8P|ryG7ztk?*bN}Hjv#~sBUfPBFmzY<$Ct;fEw4J;TkJ@ z7Ck;HmJBk>65;ZAR_6vXR|`Ep){$lD1^l3~Ql)(U-_F7Hfs2OppBM_-F#kJ8r^lK3 ze|mEI<}m-CqklX+H2;Yf#OrD^VM<((OCQcW)h+fMy6B(H$H!VtgZ1xyOdV}K(9rd( z4Gx_$py3`KEabqHP(XEU1uD2Yvf@`ba3yM(ECW3?8=pMz~6UP}p)+{y@J4Os0DE@v)|MKn*c( zYB4NoLMWe&Bs<|uN(Ls(Yb4o!u;;FUSP@Q*teN(H;JT~8+?4Uyqc2eLIQwzzEHT(I zhuHv|BAg4l=}H{a8LX{`R&hDiy(O(lH|(W#?tiyb%GdvbAjyttA?w%wPOFp6|LU|` zt>eS`{~YCxP^XJ}+OYS4%33lZ4Ew%lF50yy<{7(q)Le^{Nh}^TTpkQm=zA_R;cF;d zEsPUOM@=`OMJT^)xU1#j4@3!dfJ$_%z=aPw-WV}S8Y0obcY*uEp zh=0}m3Kz3{YAQzaFC?Gnchn|C(;JuI*E*fZo$%-srCm-Qq45T2%vhX zy>11A6ghhn^6CiQDbk58$t6b8r z6WC%5DG&K-;L|1;6J4a;x@CfvR0na8J0VExW0|3cH`O=gQk^B144zjYnF>6XP+li= z$qvidhnobgAWbAOSh=@J<2D-Zbc)7`bpf_A>_?obJ!t$KA+;avqU(T4;z7^sAb;1k zFA*P!38#W4TeLW~bz`2aM7%bceHae&9E%YLy~AtP)6=SvPI_p>*>_2$70%NxswX(c@$8-8$UeP0$k0(Bos>bbu?MB0u}1nO+AF!h9x@VxYRcZY|C__1r^kzhcMX zAjH}CKb-e^*ZuMM9|eFeUkm-JXN1bL z?|--$4{rOz-soyDyeV1{!{pG_WN9_@ziu(vL@H@9W#dIsL!M;ZJdL)bMGLrgNLnTd zVFpwd)RVsq90c2}M8}ig3@ibClMD?WKuj@l0}eNH4r0z+$0Rt$WiF-v9%MI5$*AU% z4$y5YdPgXc1iL+O@p23{qmJmJqgDzgT;Nu(+v(RnaKfD8%9Cmh9e-`B-dat`>?8I+ z!15ZVd4OnG!h}~2j-@t-1#mi3SM^@OyC~->k*=9-NK#1_8?^x?_uXc*+>{8HHVRp7 z6K9*DdOa(65d%Nn+>FMzqwA}G|EHLKUW+C0W{?)HFB>?l!r=D&ZNpw#sNP@Qj(Z<& zFR$L;UiLrtFRN0`XMYTqbOZbr>j8g-e|EO zo!|UavL6CzCywYeHH$*g(S#fS6^w4~pv~6fD)RPhd!}sVOhXG1#dCSTqauX6#@JPy z3E?(v;4-V;Ibi3Cy2}Fnuzn^bnAN_;hRF#<^(i=vH|B#)q0bFqa7WA^S|E5uvq=87P9cdI>-R<->_RmWMO z+wD1=X@9rom^v<4MuAy`1_CVLobfZAeg5~C^;boG|E+JRtJFRy6 zkpKTI#j>na%8xKP`;EIFP3~g5W-^2>#shP(Z+$XQ+LMA05`TR9(Pz$pPT3Vro@~pV zXG<^^hQAsq2*aXT`Yy>7?4|9(N9MrlVBJXdi;(8-6!oC*&%pzZ-M@$usxbd4bKprA zLam?wN3G6LHvj+4$??%){y#?v^Zzh-jE-T4^gk^f`%L`HAlyl91p27;SaK|37M>SZ zHZh?ya|$~$gMYGz*Ry=2-$xvUDJe;WyJQaLTKXb6g9Th1vISo(Td=TUFOkp}vcPy+ zPzo+-C<2I2ou^X-M}uCEc|^ zh_pg>o_lJp5l{wDFW`jE!V5$QoegG`tseL9!4C8-oqwr4UXsBZ;GW0^S}dch`u9GE z@GgmhFiLnGiZu5DleOL6c&Wb0J~Ov%s!Tq#B|X8o=lxBnCoen z&GX$n_;&Lfb$j-j*{FKvVoi2qcu|TK-BM!tYO)`4ILdvjIgerH?!~Cs%=0jHa_-n> zx~rRNgnt@hp7e>9%2z183*y2|ZIm)D`%>3vum5&%KD>Q*eKox44|~Nf(mG|7PzfB{ zk}PsbiU!kQIKDZ*yu5vTdG+?)r*UB*INkSJrc*cjG%~mI6nKOUv53g+=l=D0a5dbC zSmvIfQexSbb@tJ;&c?QN_R_e{wyj$i=FQP8GJglk2#xzUw;#{{eS0j*Mraf>)Y%8``hu=#fRI``OQyx3~3o#lX(dDu@@mKh4!Ywyy0T;{Wc`R;xPx# zicw|M`;NJdCsi!post3)D}F(9i*Y4{&woUM!=|Ih$ENQFGeSkPG=x&b#8*iwh%2W? zWIc)`=&m}jU@9I?6FkFd0<+HT>l{IP>zb^Y-dC(eVs;Z5TsfYr?K$d})@#H$`~HUy zpWgOwhgZG+?eP3#zlid#=N2lQ63{$5l>x zaddQ>>3(*j-xT_wt$7(E6#j8!{(EVZ;7L`pS6}NWI@-__V(91p_4}Br2<-U zWs^>(jGg^TS)`>SxGLVPeUAh66HRLvZ!~e1M4rp9&9B_DdtPvR@E1oeN2!STzPT>UwW*l zAXd5dh)NQ4W4v+`rcB&&P0VuBv6%#pb)Zjj0jKwQ8;(P*u}XGlaVHK+jFKmJ)NTBs z&Ry6l^=_|c>`7Wlls!++*8KiABK z)Bzz4jRQGnocyj=)Wo^LwWo%3&CtHF;9df!*iBxhsxQ9X{rn&gTz_MAS_u0@ANdnq z^h>?|tHzYD@qelNt22I~wHg8~fKN2Q3}pQ^s=Gw!|FyZV{xJQd{&JL!0=nJ~1oa)9 zm8qJuO+~cQXE%!|hY+@lg{auhifw`gP`N|Tg{^sw>V6ry&CRj}Q48x{8}uZEHe=i& zG&v=TXy~(Xd+D^AWq(<<{A_(os6*)NYfReA<;T_}B(?jI-(d1zuw^)FO;g6H(>t}; zblK*L@G3_Vm9`0e|9O@76#W&6#hQLE8@#WoYQqb4{tRlif{NI)mVFY1run>Gme7u^ zDAub_Q>f3bos@h(w{F^xfd2)QQl=v+E2O>ZBQ+wWqqXXsD1Y7Q`IJ)lS*sEu4`uq- zk`CE}1aH?b{{w66PJ&WJrXL;-ZiRwUb^rgAOF%a4|Jz4Lx%+>Pk53N!|K}(h_W#ei z5a@FV-_jEBLW)1x58sN)PhOnxEXuw>7!GQF|D|02hXR0&06fJLyh;DJa^HVEZXNu; z&r@>oowT7BuXY5P6v#qFvUZ2I!G_%a;;1c@;I?-YRlAocb@|V_Y6iVUoK%ObBS;~v)|Ja~BX#Rud bKa@i`ltVd`XD;@qAs{q!o|8~3GYGv2|al3U`|Ig8ghiA~OAvD1PuEA$r zbYW+YG=SmIf{>eT~3 zaNQAeiM{Ni!GCnfcq{USz6da~qG?&sB-DZO624PKy*`g`2xD!pA(J7e)94wzXmd&WdgaU6W ze3nLL`J61UglI~a&6<8b3S4(=xap!_eyzPi7BbtLw+uht@&hal$Pjx_s|nx>V#B#@ zvw%u9zkfCdETM}e4*p>Y|6oJQ^2Iz+H#oQUjRI$gY8%=a0W0aY(0Lp-2pwdfEQh%(9v)4G~ z(89+ybdmZXmYKgAQnJRr^gUnA?yAp9{O3%_A;>R`->vk7oD;E{ADacPqZX7y1fICy zC?yaxjvNMRUL|tC2kIb3(Z}&1>Rv-iVmYzJh;i9PoqwGk9T#tXbHJ{g$6#v5=6~S3 z#KxkF^j`tJq93u6hptgb(ZdVF&&b6T;#V%4DVcY>=tSc(k}M3lqWxa+;+{8l5N7$Zq6cYwGZHO4M== zRh-iPV^FG<@$ivkF7QzMYAhDIgMVcg)%8xnnG21yhVrxo)kBKRSG^UsdjFmXDWVrJ zoq{d9s6J$4dk#+ELcJ3HaB=ub7{59d>j`9PdTY(ACH45l6q0^Bpe~{CnYn=nA7FWI z+aScxM9v2eP^rZsTI3$S|3vWu6IW@cE^1fPE=M{208}A(>$1r-5HRBGet)@QYLa@Y z5fUmScHIbm1sVgdJQ?+NJ-y6} z`oGL6(DV!y_b1=M5_)fQgSx=>97b5qyQt}DD;@hvmERB##u9t}fNGE)NvK^)NLh!N1d~_=2T9ci8Ofj%X)+Tc~!-dOBCEf7AyO zBX_kaU>R^N3sdo)v%rrIaMysHpE;3mMQu>4{oVLx>B9=wdyqyYj3rjbA|s16W8!vG zwtT}Dq@guc=s_^%va;9CXP}s=D;|UB?{P4llKU>IUyaAs99&Pm)qnlpvvKdk*y6)sT*YcRqF8%lqrQG{_z_1`foqIe~kvicUP~`&*#^}!SMZS)W5#Ix>gpKGMhv7 zqM1++;=gMyp~1ZxjnbMf$S!IwFmJkK(hQ%}^qQ8Ob&&%Tf@!U$>^GNTXQdytMkYYz zpcj&Pg??e$+{W&&Xn!!(_VW38Pc3$3W0|vxJ}hwy3qfWS9OPk&XUgUa@gic3JwkPV zp^jh@s5%c~p&x|?yCx{rfm5ghqy`!E&=(@-8f-hNA&Egx$zO;|59_7K8u>0@0qOwRt%d5=mM*d^8dd1;;;lP!sVX_SL(rkP-5l01iwE)hE1LzNBs!|)B z9_fD)&zOg(ZpFxeI{M$5x7vkvDjQx5cZ$_8@~liF9e+!hEMtz1bGK0|W?X~B#C%Su zoTB>Qg!MO3PY=NSJ3KR@KeW$GX{q}EtE2jDeV4&7&tuAPGm1@=F&P0EC$zzaeLv%< zFh=>q1B%vm>f1C>VJCe|rnc#rRi@=E8;5xrzixt0^^#EA4V<#klyGJ<&u3I0Ark)e zJe-fYw13!i>yd}HuQgew#~wvFBHpLbmp@_6CZ*FdZYr@pLN_I}{HtH9ip@z|p;Q1N&6<<#_+j3zy>mw(o|-%=~z{|ka7JEn!K-~T(UPB#Cm z(|Ysf^sxUwN4q1`>7t%C>^-2emP`o4zAu`Kb}fo|#x5SU)*@vRiw6z22Ll!Qp36-5 z8VYv{P{5*;gW;e(Dh$1$v8^wE1mBHD$zp<`Ft^UDOA zm46v6Vl}_R#Vntij?wxH$tNaWyBt%TY{5&_iTg3vCDz(m7grx=6cq%LFZ{4&oqpLXg(SGD8nbf<=!TZ+vszrQ#4kr3$T@8KjKWCLF4BLsq<(TT?bSW4}W^* z1i5y6iTFrNI2AP6V#Kkn8}no(;3skp%x5Ag25Q>t*5a&F&pp)k zD|Q?XLY#g7!+EcF-5-ztQ2^)yc7K3d5T=H0hN$3*EI^_pl?C#w-D$N%!F#xu99t$2 z&WiT=WxZU!0w^)J!Lnaj0Ma?j6v>}*%mzAMWB3*Qoc*LotX~|gV5Tp)i=6@u_M8!=g zJxW53o8eyZ1=4Z|PrQz!=BU8VNLh;^V20g<1=8*$8IC3`3dCC`b-04ENQLhAt1H%^ z&M(_E@Q#{dUlgn8iL^#26G#K8zs73C9*G=)NnEjf#^qVDVUy-IPIM`lnYHqt#74ZSB9*k5vhgCRPo8AlJdKW|MH{$w zNLnTdVFpwd^pm>`90WV8#Ke=^3@ibElL!qSe@!uQ0}eNH4r0z+$0Rt$Z7!w%9%MI5 z$*AU%3D9FJdPgXc1iL+O@p23{qmJmJqgDzg+~8KP+v(RnaKfD8&gEv{|NnH|zrDEb z_xi(|!TIIwyTN6@$S8crnU@`_K3Yx5>?8I+!15ZVd4TA%gbA-497`Pz3*dC7uA04q ze|J&NRU%z8JCLN3EH-KbO76SOX1OU5Zfz8@+9u96L-l%A@FE6&y15yRZ%5Zx|Nc)g z{k#@S;LRW{++Q|uScSpu`P+uQv{1djx*hjE++JS2zrE~#?q61=n$H+4nFjbR))7kQ zp}J078B1*6X(d=X6^6n7@blpMYWT4~f4tFRKRUnpspLEa(oU4DI1|Ec+Q4O2y>r0M6?K;d`eFS_N-(Q^ ziw%<#it1Bv7{|}cY67|C-(^2ymVkdpnl`p14}@%LZ-gx~Uu64<+2IVKWOn1xe<4|d zTFu247RKz|y;g{~(9IP=?(SB5BCTrYi>i*ZLbuy9&148& zj0dK$Z+$XQ+S8Xw@!27F2TO_V`8E6&5DrdU{pd4iK&R}ACM(-=^=t{of5PxrBL!ht zG+W;#nS#BvUHHfxSQXZd)Vv63?oLq;`u-d|;Mo0(D4`1LpE3uYbRpFG^?%gr9A)$W z-<%ws9M=DHv@rh(Jb*>rKPXGB^dJI_5e*9a&B zs26ZTXW<1Rgw6&t%2tp2_h1M5md?}}FUjBya8G0dEtb(${d=E7e|VQfK^P^x4n>;# z0P-!qf?&wxfEL1p#rlQs6om!hGQh4Cm)XZsl2{IU-jk`b1#3MGvw6Ln2j6afqaM#* zGaJ>+T&&4%3@=KtqFYKVUoG}S4oA6;kw zFE4N3US7R@_i0=h2u{zvmg&^ZK8?)nJOv(MLo6b4`?-HT9$XD~B9^%)sFYZCWSxBs zt+R1#oxKdMv+d~Cg?V!{i_C#CLgW6;?Z@+f-(HRSLme}{e;t1MxYO8jS22~+RP>%i z%Bx1FHyEG4z3kt98uiX^`n|V1(aPKtl_9n{l%Q-UD-i1f2@0f{1f*!g==|oVJchK4t;sxu``C*RwL*K-VBT=C_V3!D#*-?R z?@mbpi5zf8who6~vuWUt~RsBuwW@3 zstKNG>IA6=6@)8~q{NX%&>gDb~#wKGTE(t3?JXW#$u;nUmx?eMDCza5@` z>=#kqwQiw8m4N2?Yt#pnBVJP(@aUHrUu!u375W;i9wHWe6nLVd|rR{Cz0C5QTsLw!dMRmpC)8Ebk| zI~0&q;%}D%lJsU@U0kOGT&F}%Gry9T2hg33e^D%au(f*9KMGXD;i1IkYm~U`8X9?y z(wKEez{2AdXFi@`^OK8jiKDZhwU#SnLHE)2_SdV>Nh9)70j;>RNhedr&VD8GP~P+6 zYbXv`pfSJTZvMN`BJx4^-X3U3*-fNp4^8oEh{RBRjw^jNrG;SS8l?T ziCeCTS#BzuN#Ixq`Xm={`kc3+9BPeKvO9}AaZqBEJb9vS;~(nWg{@NW_Ik#iRHL@W zw5mKEseFOeFGi)sqtpgF6v@3zk=*t+f71pjth7H=7suTGaVYcqA8Y0HKYW61YcH38 zZ>aw}Jvu$g)c>{Lv<~-wKTFH3Z}rg|=3+{Haer9+XMX1Anz@iF5Yo^%kaNb#?|Mf~ z)D5n!8qz&O`^JKM37ld#d6}xd_;&aDK_0lq>a-B{i7xpQUGz)6{;S56u zwu^s|-+B!o6&+#xi%B#LP0+PJ+;TFtVo zT7I>@CDb8w_B9r5=JI1}5|Y}zf8;lq{1eCeJvuh_M-_Na^ z_9Nhb0i~3gh{_6Sulh)RkG_mW__J0eLLSQWuO$<*2MOM8e_s9v_Sl^S zrHV{HJRIB#1+D7&|0$P%Y&ie7kB)Np{~RB^Ih_BWqis0kz)BE#QR|e{vqa z6_ua7IN@28eSt6>)cpQix&99Y02={#iYIuJ{%_@e|M9qW@c%wf%f)xnhF-iAWKtjt z70KEY+6EhP`-`KtP=eduVog--UZT|HKkK>~^cHba6S9_zO&m+0xC+MxZX@X=x`$1_ zpL Date: Thu, 7 Dec 2023 05:51:07 +0000 Subject: [PATCH 005/157] support deploy aznfs mount helper in v1.22 and v1.23 --- deploy/v1.22.4/csi-blob-node.yaml | 33 +++++++++++++++++++++++++++++++ deploy/v1.23.2/csi-blob-node.yaml | 33 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/deploy/v1.22.4/csi-blob-node.yaml b/deploy/v1.22.4/csi-blob-node.yaml index bff2375a6..4a30c8969 100644 --- a/deploy/v1.22.4/csi-blob-node.yaml +++ b/deploy/v1.22.4/csi-blob-node.yaml @@ -127,6 +127,7 @@ spec: - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - "--nodeid=$(KUBE_NODE_NAME)" - "--user-agent-suffix=OSS-kubectl" + - "--enable-aznfs-mount=true" ports: - containerPort: 29633 name: healthz @@ -167,12 +168,36 @@ spec: name: azure-cred - mountPath: /mnt name: blob-cache + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true resources: limits: memory: 2100Mi requests: cpu: 10m memory: 20Mi + - name: aznfswatchdog + image: mcr.microsoft.com/k8s/csi/blob-csi:latest + command: + - "aznfswatchdog" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir volumes: - name: host-usr hostPath: @@ -200,4 +225,12 @@ spec: path: /mnt type: DirectoryOrCreate name: blob-cache + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate --- diff --git a/deploy/v1.23.2/csi-blob-node.yaml b/deploy/v1.23.2/csi-blob-node.yaml index 96d646ff4..a2e4216e5 100644 --- a/deploy/v1.23.2/csi-blob-node.yaml +++ b/deploy/v1.23.2/csi-blob-node.yaml @@ -129,6 +129,7 @@ spec: - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - "--nodeid=$(KUBE_NODE_NAME)" - "--user-agent-suffix=OSS-kubectl" + - "--enable-aznfs-mount=true" ports: - containerPort: 29633 name: healthz @@ -169,12 +170,36 @@ spec: name: azure-cred - mountPath: /mnt name: blob-cache + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true resources: limits: memory: 2100Mi requests: cpu: 10m memory: 20Mi + - name: aznfswatchdog + image: mcr.microsoft.com/k8s/csi/blob-csi:latest + command: + - "aznfswatchdog" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir volumes: - name: host-usr hostPath: @@ -205,4 +230,12 @@ spec: path: /mnt type: DirectoryOrCreate name: blob-cache + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate --- From c752b3abf4caf9e08ace0e6963de2a150b3985cf Mon Sep 17 00:00:00 2001 From: umagnus Date: Fri, 8 Dec 2023 06:28:05 +0000 Subject: [PATCH 006/157] fix storage identity typo issue --- pkg/blob/blob.go | 4 ++-- pkg/blob/controllerserver.go | 2 +- pkg/blob/controllerserver_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index adc8ee696..41c6515b2 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -80,7 +80,7 @@ const ( storageSPNClientIDField = "azurestoragespnclientid" storageSPNTenantIDField = "azurestoragespntenantid" storageAuthTypeField = "azurestorageauthtype" - storageIentityClientIDField = "azurestorageidentityclientid" + storageIdentityClientIDField = "azurestorageidentityclientid" storageIdentityObjectIDField = "azurestorageidentityobjectid" storageIdentityResourceIDField = "azurestorageidentityresourceid" msiEndpointField = "msiendpoint" @@ -415,7 +415,7 @@ func (d *Driver) GetAuthEnv(ctx context.Context, volumeID, protocol string, attr case storageAuthTypeField: azureStorageAuthType = v authEnv = append(authEnv, "AZURE_STORAGE_AUTH_TYPE="+v) - case storageIentityClientIDField: + case storageIdentityClientIDField: authEnv = append(authEnv, "AZURE_STORAGE_IDENTITY_CLIENT_ID="+v) case storageIdentityObjectIDField: authEnv = append(authEnv, "AZURE_STORAGE_IDENTITY_OBJECT_ID="+v) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 223800d11..fc1d8a2fe 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -174,7 +174,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) containerNameReplaceMap[pvNameMetadata] = v case serverNameField: case storageAuthTypeField: - case storageIentityClientIDField: + case storageIdentityClientIDField: case storageIdentityObjectIDField: case storageIdentityResourceIDField: case msiEndpointField: diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 72d406f20..0f7e03747 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -520,7 +520,7 @@ func TestCreateVolume(t *testing.T) { mp[containerNameField] = "unit-test" mp[mountPermissionsField] = "0750" mp[storageAuthTypeField] = "msi" - mp[storageIentityClientIDField] = "msi" + mp[storageIdentityClientIDField] = "msi" mp[storageIdentityObjectIDField] = "msi" mp[storageIdentityResourceIDField] = "msi" mp[msiEndpointField] = "msi" From d0f03e1473fdcbbb3639402dd1625e882021a925 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 8 Dec 2023 11:29:22 +0000 Subject: [PATCH 007/157] feat: upgrade to aznfs 2.0.0 version for nfs mount --- pkg/blobplugin/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 8153961fa..0983c1802 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -39,9 +39,9 @@ RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca- # install aznfs RUN if [ "$ARCH" = "amd64" ] ; then \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/1.0.8/aznfs-1.0.8-1.x86_64.tar.gz; \ + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.0/aznfs-2.0.0-1.x86_64.tar.gz; \ else \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/1.0.8/aznfs-1.0.8-1.arm64.tar.gz;fi + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.0/aznfs-2.0.0-1.arm64.tar.gz;fi RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy From c28639b8626e727f9d30547a8175056d1351e6f0 Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Fri, 8 Dec 2023 17:15:00 +0800 Subject: [PATCH 008/157] bump azclient and fix signature change --- go.mod | 52 +-- go.sum | 60 ++- pkg/blob/controllerserver.go | 5 +- .../pre_provisioned_sastoken_tester.go | 2 +- test/utils/azure/azure_helper.go | 17 +- .../network/armnetwork/v4/CHANGELOG.md | 11 + .../network/armnetwork/v4/README.md | 29 +- .../v4/adminrulecollections_client.go | 40 +- .../armnetwork/v4/adminrules_client.go | 40 +- ...atewayprivateendpointconnections_client.go | 45 +- ...ationgatewayprivatelinkresources_client.go | 23 +- .../v4/applicationgateways_client.go | 151 +++++-- ...cationgatewaywafdynamicmanifests_client.go | 23 +- ...atewaywafdynamicmanifestsdefault_client.go | 6 +- .../v4/applicationsecuritygroups_client.go | 70 +-- .../network/armnetwork/v4/autorest.md | 2 +- .../v4/availabledelegations_client.go | 23 +- .../v4/availableendpointservices_client.go | 23 +- .../availableprivateendpointtypes_client.go | 44 +- ...vailableresourcegroupdelegations_client.go | 23 +- .../v4/availableservicealiases_client.go | 44 +- .../v4/azurefirewallfqdntags_client.go | 23 +- .../armnetwork/v4/azurefirewalls_client.go | 93 ++-- .../armnetwork/v4/bastionhosts_client.go | 75 ++-- .../v4/bgpservicecommunities_client.go | 23 +- .../network/armnetwork/v4/build.go | 2 +- .../network/armnetwork/v4/client_factory.go | 135 +++++- .../v4/configurationpolicygroups_client.go | 45 +- .../v4/connectionmonitors_client.go | 57 ++- .../v4/connectivityconfigurations_client.go | 40 +- .../network/armnetwork/v4/constants.go | 4 +- .../armnetwork/v4/customipprefixes_client.go | 70 +-- .../v4/ddoscustompolicies_client.go | 28 +- .../v4/ddosprotectionplans_client.go | 70 +-- .../v4/defaultsecurityrules_client.go | 27 +- .../armnetwork/v4/dscpconfiguration_client.go | 66 +-- ...xpressroutecircuitauthorizations_client.go | 45 +- .../expressroutecircuitconnections_client.go | 45 +- .../v4/expressroutecircuitpeerings_client.go | 45 +- .../v4/expressroutecircuits_client.go | 105 +++-- .../v4/expressrouteconnections_client.go | 28 +- ...ressroutecrossconnectionpeerings_client.go | 45 +- .../v4/expressroutecrossconnections_client.go | 88 ++-- .../v4/expressroutegateways_client.go | 41 +- .../armnetwork/v4/expressroutelinks_client.go | 27 +- .../expressrouteportauthorizations_client.go | 45 +- .../armnetwork/v4/expressrouteports_client.go | 74 ++-- .../v4/expressrouteportslocations_client.go | 27 +- ...xpressrouteproviderportslocation_client.go | 6 +- .../v4/expressrouteserviceproviders_client.go | 23 +- .../armnetwork/v4/firewallpolicies_client.go | 70 +-- .../v4/firewallpolicyidpssignatures_client.go | 6 +- ...policyidpssignaturesfiltervalues_client.go | 6 +- ...allpolicyidpssignaturesoverrides_client.go | 18 +- ...rewallpolicyrulecollectiongroups_client.go | 45 +- .../network/armnetwork/v4/flowlogs_client.go | 49 ++- .../network/armnetwork/v4/groups_client.go | 40 +- .../armnetwork/v4/hubroutetables_client.go | 45 +- .../v4/hubvirtualnetworkconnections_client.go | 45 +- .../armnetwork/v4/inboundnatrules_client.go | 45 +- .../v4/inboundsecurityrule_client.go | 11 +- .../v4/interfaceipconfigurations_client.go | 27 +- .../v4/interfaceloadbalancers_client.go | 23 +- .../armnetwork/v4/interfaces_client.go | 205 +++++---- .../v4/interfacetapconfigurations_client.go | 45 +- .../armnetwork/v4/ipallocations_client.go | 70 +-- .../network/armnetwork/v4/ipgroups_client.go | 70 +-- .../loadbalancerbackendaddresspools_client.go | 45 +- ...balancerfrontendipconfigurations_client.go | 27 +- .../loadbalancerloadbalancingrules_client.go | 27 +- .../loadbalancernetworkinterfaces_client.go | 23 +- .../v4/loadbalanceroutboundrules_client.go | 27 +- .../v4/loadbalancerprobes_client.go | 27 +- .../armnetwork/v4/loadbalancers_client.go | 92 ++-- .../v4/localnetworkgateways_client.go | 49 ++- .../armnetwork/v4/management_client.go | 128 ++++-- ...ntgroupnetworkmanagerconnections_client.go | 35 +- .../armnetwork/v4/managercommits_client.go | 11 +- .../v4/managerdeploymentstatus_client.go | 6 +- .../network/armnetwork/v4/managers_client.go | 65 +-- .../network/armnetwork/v4/models_serde.go | 72 +-- .../armnetwork/v4/natgateways_client.go | 70 +-- .../network/armnetwork/v4/natrules_client.go | 45 +- .../armnetwork/v4/operations_client.go | 23 +- .../armnetwork/v4/p2svpngateways_client.go | 120 +++-- .../armnetwork/v4/packetcaptures_client.go | 44 +- ...erexpressroutecircuitconnections_client.go | 27 +- .../v4/privatednszonegroups_client.go | 45 +- .../armnetwork/v4/privateendpoints_client.go | 66 +-- .../v4/privatelinkservices_client.go | 164 ++++--- .../network/armnetwork/v4/profiles_client.go | 65 +-- .../armnetwork/v4/publicipaddresses_client.go | 171 ++++---- .../armnetwork/v4/publicipprefixes_client.go | 70 +-- .../v4/resourcenavigationlinks_client.go | 6 +- .../armnetwork/v4/routefilterrules_client.go | 45 +- .../armnetwork/v4/routefilters_client.go | 70 +-- .../network/armnetwork/v4/routemaps_client.go | 45 +- .../network/armnetwork/v4/routes_client.go | 45 +- .../armnetwork/v4/routetables_client.go | 70 +-- .../armnetwork/v4/routingintent_client.go | 45 +- .../armnetwork/v4/scopeconnections_client.go | 35 +- .../v4/securityadminconfigurations_client.go | 40 +- .../armnetwork/v4/securitygroups_client.go | 70 +-- .../v4/securitypartnerproviders_client.go | 70 +-- .../armnetwork/v4/securityrules_client.go | 45 +- .../v4/serviceassociationlinks_client.go | 6 +- .../v4/serviceendpointpolicies_client.go | 70 +-- ...serviceendpointpolicydefinitions_client.go | 45 +- .../v4/servicetaginformation_client.go | 23 +- .../armnetwork/v4/servicetags_client.go | 6 +- .../armnetwork/v4/staticmembers_client.go | 35 +- .../network/armnetwork/v4/subnets_client.go | 63 ++- ...riptionnetworkmanagerconnections_client.go | 35 +- .../network/armnetwork/v4/time_rfc3339.go | 40 +- .../network/armnetwork/v4/usages_client.go | 23 +- .../network/armnetwork/v4/vipswap_client.go | 22 +- .../v4/virtualapplianceconnections_client.go | 45 +- .../armnetwork/v4/virtualappliances_client.go | 70 +-- .../v4/virtualappliancesites_client.go | 45 +- .../v4/virtualapplianceskus_client.go | 27 +- .../v4/virtualhubbgpconnection_client.go | 24 +- .../v4/virtualhubbgpconnections_client.go | 41 +- .../v4/virtualhubipconfiguration_client.go | 45 +- .../v4/virtualhubroutetablev2s_client.go | 45 +- .../armnetwork/v4/virtualhubs_client.go | 97 ++-- ...virtualnetworkgatewayconnections_client.go | 112 ++++- .../virtualnetworkgatewaynatrules_client.go | 45 +- .../v4/virtualnetworkgateways_client.go | 209 +++++++-- .../v4/virtualnetworkpeerings_client.go | 45 +- .../armnetwork/v4/virtualnetworks_client.go | 114 ++--- .../v4/virtualnetworktaps_client.go | 70 +-- .../v4/virtualrouterpeerings_client.go | 45 +- .../armnetwork/v4/virtualrouters_client.go | 66 +-- .../armnetwork/v4/virtualwans_client.go | 70 +-- .../armnetwork/v4/vpnconnections_client.go | 63 ++- .../armnetwork/v4/vpngateways_client.go | 102 +++-- .../v4/vpnlinkconnections_client.go | 41 +- .../v4/vpnserverconfigurations_client.go | 70 +-- ...urationsassociatedwithvirtualwan_client.go | 11 +- .../v4/vpnsitelinkconnections_client.go | 6 +- .../armnetwork/v4/vpnsitelinks_client.go | 27 +- .../network/armnetwork/v4/vpnsites_client.go | 70 +-- .../v4/vpnsitesconfiguration_client.go | 11 +- .../network/armnetwork/v4/watchers_client.go | 130 +++++- .../webapplicationfirewallpolicies_client.go | 61 +-- .../armnetwork/v4/webcategories_client.go | 27 +- .../sdk/storage/azblob/CHANGELOG.md | 79 ++++ .../sdk/storage/azblob/README.md | 148 ++++--- .../sdk/storage/azblob/appendblob/client.go | 81 +++- .../sdk/storage/azblob/appendblob/models.go | 34 +- .../storage/azblob/appendblob/responses.go | 2 +- .../sdk/storage/azblob/assets.json | 6 + .../sdk/storage/azblob/blob/client.go | 62 +-- .../sdk/storage/azblob/blob/constants.go | 12 +- .../sdk/storage/azblob/blob/models.go | 108 +++-- .../sdk/storage/azblob/blob/responses.go | 18 +- .../sdk/storage/azblob/blob/retry_reader.go | 4 +- .../sdk/storage/azblob/blob/utils.go | 18 +- .../storage/azblob/bloberror/error_codes.go | 156 +++++++ .../sdk/storage/azblob/blockblob/client.go | 73 +-- .../sdk/storage/azblob/blockblob/constants.go | 2 +- .../sdk/storage/azblob/blockblob/mmf_unix.go | 4 +- .../storage/azblob/blockblob/mmf_windows.go | 4 +- .../sdk/storage/azblob/blockblob/models.go | 138 ++++-- .../sdk/storage/azblob/blockblob/responses.go | 5 +- .../sdk/storage/azblob/ci.yml | 5 + .../sdk/storage/azblob/client.go | 5 +- .../sdk/storage/azblob/constants.go | 4 +- .../storage/azblob/container/batch_builder.go | 94 ++++ .../sdk/storage/azblob/container/client.go | 128 ++++-- .../sdk/storage/azblob/container/constants.go | 4 +- .../sdk/storage/azblob/container/models.go | 152 ++++++- .../sdk/storage/azblob/container/responses.go | 28 ++ .../sdk/storage/azblob/doc.go | 43 +- .../storage/azblob/internal/base/clients.go | 39 +- .../azblob/internal/exported/blob_batch.go | 279 ++++++++++++ .../azblob/internal/exported/log_events.go | 20 + .../azblob/internal/exported/version.go | 2 +- .../azblob/internal/generated/autorest.md | 120 ++++- .../azblob/internal/generated/blob_client.go | 18 +- .../generated/zz_appendblob_client.go | 112 ++--- .../internal/generated/zz_blob_client.go | 342 ++++++++------- .../internal/generated/zz_blockblob_client.go | 174 ++++---- .../internal/generated/zz_container_client.go | 198 +++++---- .../azblob/internal/generated/zz_models.go | 63 ++- .../internal/generated/zz_models_serde.go | 104 ++--- .../internal/generated/zz_pageblob_client.go | 218 ++++----- .../internal/generated/zz_response_types.go | 22 +- .../internal/generated/zz_service_client.go | 63 +-- .../internal/shared/challenge_policy.go | 113 +++++ .../storage/azblob/internal/shared/shared.go | 57 +-- .../sdk/storage/azblob/log.go | 16 + .../sdk/storage/azblob/models.go | 8 +- .../sdk/storage/azblob/pageblob/client.go | 24 +- .../sdk/storage/azblob/pageblob/constants.go | 4 +- .../sdk/storage/azblob/pageblob/models.go | 73 ++- .../sdk/storage/azblob/sas/account.go | 92 ++-- .../sdk/storage/azblob/sas/query_params.go | 58 +-- .../sdk/storage/azblob/sas/service.go | 72 ++- .../storage/azblob/service/batch_builder.go | 94 ++++ .../sdk/storage/azblob/service/client.go | 107 ++++- .../sdk/storage/azblob/service/models.go | 169 ++++++- .../sdk/storage/azblob/service/responses.go | 22 + .../apps/confidential/confidential.go | 36 +- .../apps/internal/base/base.go | 10 + .../internal/base/internal/storage/items.go | 12 +- .../internal/storage/partitioned_storage.go | 18 +- .../internal/base/internal/storage/storage.go | 17 +- .../apps/internal/oauth/oauth.go | 1 + .../oauth/ops/accesstokens/accesstokens.go | 6 + .../internal/oauth/ops/accesstokens/tokens.go | 1 + .../internal/oauth/ops/authority/authority.go | 37 ++ .../apps/internal/version/version.go | 2 +- .../apps/public/public.go | 35 ++ .../protosanitizer/protosanitizer.go | 9 +- vendor/golang.org/x/exp/slices/cmp.go | 44 ++ vendor/golang.org/x/exp/slices/slices.go | 415 +++++++++++++++--- vendor/golang.org/x/exp/slices/sort.go | 170 ++++--- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +++---- .../golang.org/x/exp/slices/zsortordered.go | 34 +- vendor/golang.org/x/net/context/go17.go | 1 + vendor/golang.org/x/net/context/go19.go | 1 + vendor/golang.org/x/net/context/pre_go17.go | 1 + vendor/golang.org/x/net/context/pre_go19.go | 1 + vendor/golang.org/x/net/http2/databuffer.go | 59 ++- vendor/golang.org/x/net/http2/go111.go | 30 ++ vendor/golang.org/x/net/http2/go115.go | 27 ++ vendor/golang.org/x/net/http2/go118.go | 17 + vendor/golang.org/x/net/http2/not_go111.go | 21 + vendor/golang.org/x/net/http2/not_go115.go | 31 ++ vendor/golang.org/x/net/http2/not_go118.go | 17 + vendor/golang.org/x/net/http2/server.go | 24 +- vendor/golang.org/x/net/http2/transport.go | 33 +- vendor/golang.org/x/net/idna/go118.go | 1 + vendor/golang.org/x/net/idna/idna10.0.0.go | 1 + vendor/golang.org/x/net/idna/idna9.0.0.go | 1 + vendor/golang.org/x/net/idna/pre_go118.go | 1 + vendor/golang.org/x/net/idna/tables10.0.0.go | 1 + vendor/golang.org/x/net/idna/tables11.0.0.go | 1 + vendor/golang.org/x/net/idna/tables12.0.0.go | 1 + vendor/golang.org/x/net/idna/tables13.0.0.go | 1 + vendor/golang.org/x/net/idna/tables15.0.0.go | 1 + vendor/golang.org/x/net/idna/tables9.0.0.go | 1 + vendor/golang.org/x/net/idna/trie12.0.0.go | 1 + vendor/golang.org/x/net/idna/trie13.0.0.go | 1 + .../api/annotations/field_info.pb.go | 295 ------------- vendor/k8s.io/utils/integer/integer.go | 8 +- vendor/k8s.io/utils/pointer/pointer.go | 283 +++--------- vendor/k8s.io/utils/ptr/OWNERS | 10 + vendor/k8s.io/utils/ptr/README.md | 3 + vendor/k8s.io/utils/ptr/ptr.go | 73 +++ vendor/modules.txt | 34 +- .../pkg/azclient/arm_conf.go | 38 +- .../cloud-provider-azure/pkg/azclient/auth.go | 89 +--- .../pkg/azclient/auth_conf.go | 69 +++ .../pkg/azclient/cloud.go | 19 +- .../pkg/azclient/factory_conf.go | 15 +- .../pkg/azclient/factory_gen.go | 165 ++++++- .../pkg/azclient/utils/const.go | 2 +- .../pkg/azclient/utils/options.go | 36 +- .../azureclients/armclient/azure_armclient.go | 10 +- .../azureclients/vmclient/azure_vmclient.go | 4 +- .../vmssvmclient/azure_vmssvmclient.go | 2 +- .../pkg/provider/azure.go | 20 +- .../pkg/provider/azure_config.go | 29 -- .../pkg/provider/azure_controller_common.go | 16 +- .../pkg/provider/azure_controller_standard.go | 11 +- .../pkg/provider/azure_controller_vmss.go | 10 +- .../pkg/provider/azure_controller_vmssflex.go | 11 +- .../pkg/provider/azure_fakes.go | 13 +- .../pkg/provider/azure_instance_metadata.go | 2 +- .../pkg/provider/azure_instances_v1.go | 14 +- .../pkg/provider/azure_loadbalancer.go | 47 +- .../azure_loadbalancer_backendpool.go | 10 +- .../azure_loadbalancer_healthprobe.go | 2 +- .../pkg/provider/azure_loadbalancer_repo.go | 2 +- .../provider/azure_privatelinkservice_repo.go | 2 +- .../pkg/provider/azure_routes.go | 10 +- .../pkg/provider/azure_standard.go | 4 +- .../pkg/provider/azure_storageaccount.go | 7 +- .../pkg/provider/azure_utils.go | 34 +- .../pkg/provider/azure_vmss.go | 44 +- .../pkg/provider/azure_vmssflex.go | 8 +- .../pkg/provider/azure_zones.go | 4 +- .../pkg/provider/config/azure_auth.go | 32 +- .../pkg/provider/config/azure_ratelimit.go | 2 +- .../pkg/retry/azure_error.go | 4 +- .../pkg/retry/azure_retry.go | 18 +- .../pkg/util/deepcopy/deepcopy.go | 14 +- 289 files changed, 9008 insertions(+), 5180 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/go115.go create mode 100644 vendor/golang.org/x/net/http2/go118.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go create mode 100644 vendor/golang.org/x/net/http2/not_go115.go create mode 100644 vendor/golang.org/x/net/http2/not_go118.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go create mode 100644 vendor/k8s.io/utils/ptr/OWNERS create mode 100644 vendor/k8s.io/utils/ptr/README.md create mode 100644 vendor/k8s.io/utils/ptr/ptr.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go diff --git a/go.mod b/go.mod index fd182911c..50c8225f3 100644 --- a/go.mod +++ b/go.mod @@ -2,62 +2,59 @@ module sigs.k8s.io/blob-csi-driver go 1.21 +toolchain go1.21.4 + require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/container-storage-interface/spec v1.8.0 - github.com/go-ini/ini v1.67.0 + github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 - github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.13.2 + github.com/imdario/mergo v0.3.9 // indirect + github.com/kubernetes-csi/csi-lib-utils v0.13.0 github.com/onsi/gomega v1.30.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 - github.com/pkg/errors v0.9.1 - github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 - golang.org/x/net v0.19.0 + golang.org/x/net v0.17.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 k8s.io/api v0.28.4 k8s.io/apimachinery v0.28.4 - k8s.io/apiserver v0.28.4 k8s.io/client-go v0.28.4 k8s.io/component-base v0.28.4 k8s.io/klog/v2 v2.110.1 k8s.io/kubernetes v1.28.4 k8s.io/mount-utils v0.28.4 - k8s.io/pod-security-admission v0.28.4 - k8s.io/utils v0.0.0-20230505201702-9f6742963106 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231128114432-f80321a29d3a - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231128114432-f80321a29d3a - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231128114432-f80321a29d3a + k8s.io/utils v0.0.0-20231127182322-b307cd553661 + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 sigs.k8s.io/yaml v1.4.0 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect @@ -73,13 +70,13 @@ require ( github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ini/ini v1.67.0 github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect @@ -92,7 +89,6 @@ require ( github.com/google/uuid v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect - github.com/imdario/mergo v0.3.9 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -104,14 +100,17 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.13.2 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect + github.com/satori/go.uuid v1.2.0 github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect @@ -131,7 +130,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect golang.org/x/crypto v0.16.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.15.0 // indirect @@ -140,14 +139,15 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect + k8s.io/apiserver v0.28.4 k8s.io/cloud-provider v0.28.4 // indirect k8s.io/component-helpers v0.28.4 // indirect k8s.io/controller-manager v0.28.4 // indirect @@ -155,7 +155,9 @@ require ( k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.28.4 // indirect + k8s.io/pod-security-admission v0.28.4 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 421aaf57d..c9d87e875 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ -cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= -cloud.google.com/go/compute v1.23.2 h1:nWEMDhgbBkBJjfpVySqU4jgWdc22PLR0o4vEexZHers= -cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= +cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= @@ -17,24 +17,22 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armconta github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 h1:AAIdAyPkFff6XTct2lQCxOWN/+LnA41S7kIkzKaMbyE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0/go.mod h1:StGsLbuJh06Bd8IBfnAlIFV3fLb+gkczONWf15hpX2E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0 h1:iGj7n4SmssnseLryJRs/0lb4Db129ioYOCPSPC+vEsw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0/go.mod h1:qeBrdANBgW4QsU1bF5/9qjrPRwFIt+AnOMxyH5Bwkhk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 h1:9Eih8XcEeQnFD0ntMlUDleKMzfeCeUfa+VbnDCI4AZs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0/go.mod h1:wGPyTi+aURdqPAGMZDQqnNs9IrShADF8w2WZb6bKeq0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQOL6erY0Z5t71ak1uHV4QFokCOZk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -55,8 +53,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -191,8 +189,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.16.0 h1:LXCvkhXHtFOkl7LoDqFdho/MuebccZqWxLwhKiRGiBg= -github.com/kubernetes-csi/csi-lib-utils v0.16.0/go.mod h1:fp1Oik+45tP2o4X9SD/SBWXLTQYT9wtLxGasBE3+vBI= +github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w= +github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -327,8 +325,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -347,8 +345,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -405,12 +403,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= @@ -471,16 +469,16 @@ k8s.io/mount-utils v0.28.4 h1:5GOZLm2dXi2fr+MKY8hS6kdV5reXrZBiK7848O5MVD0= k8s.io/mount-utils v0.28.4/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= k8s.io/pod-security-admission v0.28.4 h1:b9d6zfKNjkawrO2gF7rBr5XoSZqPfE6UjKLNjgXYrr0= k8s.io/pod-security-admission v0.28.4/go.mod h1:MVYrZx0Q6ewsZ05Ml2+Ox03HQMAVjO60oombQNmJ44E= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231128114432-f80321a29d3a h1:e332VvGZflcYOgaFSi6uCCq+17518Mwe6HXG3mN8A4M= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231128114432-f80321a29d3a/go.mod h1:M36MFsCkzXOA4MYSGwemzBRDfjb29LT/rmOSs2qnUxc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231128114432-f80321a29d3a h1:k3ruyFIAxcCi0XvrdIXt6Xvbw/iiSoBh58tF1NZQ3Sk= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231128114432-f80321a29d3a/go.mod h1:8F3Frjd9iJjzk9h2dmqG5DrgJ8+Ge48UvO075A8KNFc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231128114432-f80321a29d3a h1:o2EKmtfsv4U5r1g4hqlUva4+ZptO4YzopHCppVj7mho= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231128114432-f80321a29d3a/go.mod h1:JY+50qlRnEpQGhcw8fem/o9E/Q8OvmWSJPov+SpxTXM= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 h1:UybRilKUwfcg3CZh51++O/e6ppBRdT/UY0TjGJfWkPw= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9/go.mod h1:BsbV0DptIzi3NdbPXIxruq9TRI4RSp49eV4CFXBssy4= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e h1:U001A7jnOOi8eiYceYeCLK2S3rTX4K2atR8uNDw+SL8= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e/go.mod h1:dckGAqm0wUQNqqvCEeWhfXKL7DB/r9zchDq9xdcF/Qk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 h1:0XsdZlKjVI0UZYhvg3VbXCPFRRQS5VL1idrTKgzJjnc= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9/go.mod h1:dDc0Ixf5VI01TkTj83ENW1hH5jImGJsdKhQgFRyQsyA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 223800d11..9e6347617 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" @@ -817,7 +818,9 @@ func generateSASToken(accountName, accountKey, storageEndpointSuffix string, exp sasURL, err := serviceClient.GetSASURL( sas.AccountResourceTypes{Object: true, Service: false, Container: true}, sas.AccountPermissions{Read: true, List: true, Write: true}, - sas.AccountServices{Blob: true}, time.Now(), time.Now().Add(time.Duration(expiryTime)*time.Minute)) + time.Now().Add(time.Duration(expiryTime)*time.Minute), + &service.GetSASURLOptions{StartTime: to.Ptr(time.Now())}, + ) if err != nil { return "", err } diff --git a/test/e2e/testsuites/pre_provisioned_sastoken_tester.go b/test/e2e/testsuites/pre_provisioned_sastoken_tester.go index fb50cb03b..28daca903 100644 --- a/test/e2e/testsuites/pre_provisioned_sastoken_tester.go +++ b/test/e2e/testsuites/pre_provisioned_sastoken_tester.go @@ -99,7 +99,7 @@ func GenerateSASToken(accountName, accountKey string) string { sasURL, err := serviceClient.GetSASURL( sas.AccountResourceTypes{Object: true, Service: true, Container: true}, sas.AccountPermissions{Read: true, List: true, Write: true, Delete: true, Add: true, Create: true, Update: true}, - sas.AccountServices{Blob: true}, time.Now(), time.Now().Add(10*time.Hour)) + time.Now().Add(10*time.Hour), nil) framework.ExpectNoError(err) u, err := url.Parse(sasURL) framework.ExpectNoError(err) diff --git a/test/utils/azure/azure_helper.go b/test/utils/azure/azure_helper.go index 81ac7a990..a368e56a2 100644 --- a/test/utils/azure/azure_helper.go +++ b/test/utils/azure/azure_helper.go @@ -22,8 +22,6 @@ import ( "os" "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" @@ -39,21 +37,12 @@ type Client struct { func GetClient(cloud, subscriptionID, clientID, tenantID, clientSecret string) (*Client, error) { armConfig := &azclient.ARMClientConfig{ - Cloud: cloud, + Cloud: cloud, + TenantID: tenantID, } - cloudConfig, err := azclient.GetAzureCloudConfig(armConfig) - if err != nil { - return nil, err - } - credProvider, err := azclient.NewAuthProvider(azclient.AzureAuthConfig{ - TenantID: tenantID, + credProvider, err := azclient.NewAuthProvider(armConfig, &azclient.AzureAuthConfig{ AADClientID: clientID, AADClientSecret: clientSecret, - }, &arm.ClientOptions{ - AuxiliaryTenants: []string{tenantID}, - ClientOptions: policy.ClientOptions{ - Cloud: *cloudConfig, - }, }) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/CHANGELOG.md index c8a7b2b16..0d27c0773 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/CHANGELOG.md @@ -1,5 +1,16 @@ # Release History +## 4.3.0 (2023-11-24) +### Features Added + +- Support for test fakes and OpenTelemetry trace spans. + + +## 4.3.0-beta.1 (2023-10-09) +### Features Added + +- Support for test fakes and OpenTelemetry trace spans. + ## 4.2.0 (2023-09-22) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/README.md index 2e7907792..093565038 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/README.md @@ -61,32 +61,15 @@ client := clientFactory.NewVirtualHubBgpConnectionsClient() ``` ## Fakes -The `fake` package provides implementations for fake servers that can be used for testing. -To create a fake server, declare an instance of the required fake server type(s). -```go -myFakeInterfacesServer := fake.InterfacesServer{} -``` -Next, provide func implementations for the methods you wish to fake. -The named return variables can be used to simplify return value construction. -```go -myFakeInterfacesServer.Get = func(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { - // TODO: resp.SetResponse(/* your fake InterfacesClientGetResponse response */) - return -} -``` -You connect the fake server to a client instance during construction through the optional transport. -Use `NewTokenCredential()` from `azcore/fake` to obtain a fake credential. -```go -import azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" -client, err := armnetwork.NewInterfacesClient("subscriptionID", azfake.NewTokenCredential(), &arm.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - Transport: fake.NewInterfacesServerTransport(&myFakeInterfacesServer), - }, -}) -``` + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. ## More sample code +- [Creating a Fake](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/resourcemanager/network/armnetwork/fake_example_test.go) - [IP Address](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/network/ipaddress) - [Load Balancer](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/network/loadbalancer) - [Network Interface](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/network/networkInterface) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrulecollections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrulecollections_client.go index aafaa393f..c3b96670a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrulecollections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrulecollections_client.go @@ -34,7 +34,7 @@ type AdminRuleCollectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAdminRuleCollectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AdminRuleCollectionsClient, error) { - cl, err := arm.NewClient(moduleName+".AdminRuleCollectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -58,6 +58,10 @@ func NewAdminRuleCollectionsClient(subscriptionID string, credential azcore.Toke // method. func (client *AdminRuleCollectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleCollection AdminRuleCollection, options *AdminRuleCollectionsClientCreateOrUpdateOptions) (AdminRuleCollectionsClientCreateOrUpdateResponse, error) { var err error + const operationName = "AdminRuleCollectionsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, ruleCollection, options) if err != nil { return AdminRuleCollectionsClientCreateOrUpdateResponse{}, err @@ -138,10 +142,13 @@ func (client *AdminRuleCollectionsClient) BeginDelete(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AdminRuleCollectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AdminRuleCollectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AdminRuleCollectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -151,6 +158,10 @@ func (client *AdminRuleCollectionsClient) BeginDelete(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *AdminRuleCollectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *AdminRuleCollectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "AdminRuleCollectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, options) if err != nil { return nil, err @@ -215,6 +226,10 @@ func (client *AdminRuleCollectionsClient) deleteCreateRequest(ctx context.Contex // method. func (client *AdminRuleCollectionsClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, options *AdminRuleCollectionsClientGetOptions) (AdminRuleCollectionsClientGetResponse, error) { var err error + const operationName = "AdminRuleCollectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, options) if err != nil { return AdminRuleCollectionsClientGetResponse{}, err @@ -288,25 +303,20 @@ func (client *AdminRuleCollectionsClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AdminRuleCollectionsClientListResponse) (AdminRuleCollectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AdminRuleCollectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) + }, nil) if err != nil { return AdminRuleCollectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AdminRuleCollectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AdminRuleCollectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrules_client.go index 499ef39c6..f5928b3df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrules_client.go @@ -34,7 +34,7 @@ type AdminRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAdminRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AdminRulesClient, error) { - cl, err := arm.NewClient(moduleName+".AdminRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -59,6 +59,10 @@ func NewAdminRulesClient(subscriptionID string, credential azcore.TokenCredentia // method. func (client *AdminRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, adminRule BaseAdminRuleClassification, options *AdminRulesClientCreateOrUpdateOptions) (AdminRulesClientCreateOrUpdateResponse, error) { var err error + const operationName = "AdminRulesClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, ruleName, adminRule, options) if err != nil { return AdminRulesClientCreateOrUpdateResponse{}, err @@ -143,10 +147,13 @@ func (client *AdminRulesClient) BeginDelete(ctx context.Context, resourceGroupNa } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AdminRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AdminRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AdminRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -156,6 +163,10 @@ func (client *AdminRulesClient) BeginDelete(ctx context.Context, resourceGroupNa // Generated from API version 2023-05-01 func (client *AdminRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, options *AdminRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "AdminRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, ruleName, options) if err != nil { return nil, err @@ -224,6 +235,10 @@ func (client *AdminRulesClient) deleteCreateRequest(ctx context.Context, resourc // - options - AdminRulesClientGetOptions contains the optional parameters for the AdminRulesClient.Get method. func (client *AdminRulesClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, ruleCollectionName string, ruleName string, options *AdminRulesClientGetOptions) (AdminRulesClientGetResponse, error) { var err error + const operationName = "AdminRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, ruleName, options) if err != nil { return AdminRulesClientGetResponse{}, err @@ -301,25 +316,20 @@ func (client *AdminRulesClient) NewListPager(resourceGroupName string, networkMa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AdminRulesClientListResponse) (AdminRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AdminRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, ruleCollectionName, options) + }, nil) if err != nil { return AdminRulesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AdminRulesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AdminRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivateendpointconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivateendpointconnections_client.go index 251db143c..6632c4bd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivateendpointconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivateendpointconnections_client.go @@ -33,7 +33,7 @@ type ApplicationGatewayPrivateEndpointConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationGatewayPrivateEndpointConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationGatewayPrivateEndpointConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationGatewayPrivateEndpointConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) BeginDelete(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewayPrivateEndpointConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) BeginDelete(ct // Generated from API version 2023-05-01 func (client *ApplicationGatewayPrivateEndpointConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, options *ApplicationGatewayPrivateEndpointConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewayPrivateEndpointConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, applicationGatewayName, connectionName, options) if err != nil { return nil, err @@ -130,6 +137,10 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) deleteCreateRe // method. func (client *ApplicationGatewayPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, options *ApplicationGatewayPrivateEndpointConnectionsClientGetOptions) (ApplicationGatewayPrivateEndpointConnectionsClientGetResponse, error) { var err error + const operationName = "ApplicationGatewayPrivateEndpointConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, applicationGatewayName, connectionName, options) if err != nil { return ApplicationGatewayPrivateEndpointConnectionsClientGetResponse{}, err @@ -198,25 +209,20 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) NewListPager(r return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewayPrivateEndpointConnectionsClientListResponse) (ApplicationGatewayPrivateEndpointConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ApplicationGatewayPrivateEndpointConnectionsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewayPrivateEndpointConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) + }, nil) if err != nil { return ApplicationGatewayPrivateEndpointConnectionsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewayPrivateEndpointConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -273,10 +279,13 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) BeginUpdate(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewayPrivateEndpointConnectionsClientUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -286,6 +295,10 @@ func (client *ApplicationGatewayPrivateEndpointConnectionsClient) BeginUpdate(ct // Generated from API version 2023-05-01 func (client *ApplicationGatewayPrivateEndpointConnectionsClient) update(ctx context.Context, resourceGroupName string, applicationGatewayName string, connectionName string, parameters ApplicationGatewayPrivateEndpointConnection, options *ApplicationGatewayPrivateEndpointConnectionsClientBeginUpdateOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewayPrivateEndpointConnectionsClient.BeginUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateCreateRequest(ctx, resourceGroupName, applicationGatewayName, connectionName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivatelinkresources_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivatelinkresources_client.go index 6ed07da77..a57e95d52 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivatelinkresources_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivatelinkresources_client.go @@ -33,7 +33,7 @@ type ApplicationGatewayPrivateLinkResourcesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationGatewayPrivateLinkResourcesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationGatewayPrivateLinkResourcesClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationGatewayPrivateLinkResourcesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *ApplicationGatewayPrivateLinkResourcesClient) NewListPager(resourc return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewayPrivateLinkResourcesClientListResponse) (ApplicationGatewayPrivateLinkResourcesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewayPrivateLinkResourcesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) + }, nil) if err != nil { return ApplicationGatewayPrivateLinkResourcesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ApplicationGatewayPrivateLinkResourcesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewayPrivateLinkResourcesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgateways_client.go index ee0dad7ab..713c18f0e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgateways_client.go @@ -33,7 +33,7 @@ type ApplicationGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -60,10 +60,13 @@ func (client *ApplicationGatewaysClient) BeginBackendHealth(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientBackendHealthResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientBackendHealthResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientBackendHealthResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -73,6 +76,10 @@ func (client *ApplicationGatewaysClient) BeginBackendHealth(ctx context.Context, // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) backendHealth(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *ApplicationGatewaysClientBeginBackendHealthOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginBackendHealth" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.backendHealthCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) if err != nil { return nil, err @@ -135,10 +142,13 @@ func (client *ApplicationGatewaysClient) BeginBackendHealthOnDemand(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientBackendHealthOnDemandResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientBackendHealthOnDemandResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientBackendHealthOnDemandResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -149,6 +159,10 @@ func (client *ApplicationGatewaysClient) BeginBackendHealthOnDemand(ctx context. // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) backendHealthOnDemand(ctx context.Context, resourceGroupName string, applicationGatewayName string, probeRequest ApplicationGatewayOnDemandProbe, options *ApplicationGatewaysClientBeginBackendHealthOnDemandOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginBackendHealthOnDemand" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.backendHealthOnDemandCreateRequest(ctx, resourceGroupName, applicationGatewayName, probeRequest, options) if err != nil { return nil, err @@ -213,10 +227,13 @@ func (client *ApplicationGatewaysClient) BeginCreateOrUpdate(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -226,6 +243,10 @@ func (client *ApplicationGatewaysClient) BeginCreateOrUpdate(ctx context.Context // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, options *ApplicationGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, applicationGatewayName, parameters, options) if err != nil { return nil, err @@ -286,10 +307,13 @@ func (client *ApplicationGatewaysClient) BeginDelete(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -299,6 +323,10 @@ func (client *ApplicationGatewaysClient) BeginDelete(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *ApplicationGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) if err != nil { return nil, err @@ -349,6 +377,10 @@ func (client *ApplicationGatewaysClient) deleteCreateRequest(ctx context.Context // - options - ApplicationGatewaysClientGetOptions contains the optional parameters for the ApplicationGatewaysClient.Get method. func (client *ApplicationGatewaysClient) Get(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *ApplicationGatewaysClientGetOptions) (ApplicationGatewaysClientGetResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) if err != nil { return ApplicationGatewaysClientGetResponse{}, err @@ -409,6 +441,10 @@ func (client *ApplicationGatewaysClient) getHandleResponse(resp *http.Response) // method. func (client *ApplicationGatewaysClient) GetSSLPredefinedPolicy(ctx context.Context, predefinedPolicyName string, options *ApplicationGatewaysClientGetSSLPredefinedPolicyOptions) (ApplicationGatewaysClientGetSSLPredefinedPolicyResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.GetSSLPredefinedPolicy" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getSSLPredefinedPolicyCreateRequest(ctx, predefinedPolicyName, options) if err != nil { return ApplicationGatewaysClientGetSSLPredefinedPolicyResponse{}, err @@ -468,25 +504,20 @@ func (client *ApplicationGatewaysClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewaysClientListResponse) (ApplicationGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ApplicationGatewaysClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ApplicationGatewaysClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -532,25 +563,20 @@ func (client *ApplicationGatewaysClient) NewListAllPager(options *ApplicationGat return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewaysClientListAllResponse) (ApplicationGatewaysClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ApplicationGatewaysClientListAllResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewaysClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return ApplicationGatewaysClientListAllResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewaysClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -589,6 +615,10 @@ func (client *ApplicationGatewaysClient) listAllHandleResponse(resp *http.Respon // method. func (client *ApplicationGatewaysClient) ListAvailableRequestHeaders(ctx context.Context, options *ApplicationGatewaysClientListAvailableRequestHeadersOptions) (ApplicationGatewaysClientListAvailableRequestHeadersResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.ListAvailableRequestHeaders" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableRequestHeadersCreateRequest(ctx, options) if err != nil { return ApplicationGatewaysClientListAvailableRequestHeadersResponse{}, err @@ -640,6 +670,10 @@ func (client *ApplicationGatewaysClient) listAvailableRequestHeadersHandleRespon // method. func (client *ApplicationGatewaysClient) ListAvailableResponseHeaders(ctx context.Context, options *ApplicationGatewaysClientListAvailableResponseHeadersOptions) (ApplicationGatewaysClientListAvailableResponseHeadersResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.ListAvailableResponseHeaders" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableResponseHeadersCreateRequest(ctx, options) if err != nil { return ApplicationGatewaysClientListAvailableResponseHeadersResponse{}, err @@ -691,6 +725,10 @@ func (client *ApplicationGatewaysClient) listAvailableResponseHeadersHandleRespo // method. func (client *ApplicationGatewaysClient) ListAvailableSSLOptions(ctx context.Context, options *ApplicationGatewaysClientListAvailableSSLOptionsOptions) (ApplicationGatewaysClientListAvailableSSLOptionsResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.ListAvailableSSLOptions" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableSSLOptionsCreateRequest(ctx, options) if err != nil { return ApplicationGatewaysClientListAvailableSSLOptionsResponse{}, err @@ -745,25 +783,20 @@ func (client *ApplicationGatewaysClient) NewListAvailableSSLPredefinedPoliciesPa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse) (ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAvailableSSLPredefinedPoliciesCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewaysClient.NewListAvailableSSLPredefinedPoliciesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAvailableSSLPredefinedPoliciesCreateRequest(ctx, options) + }, nil) if err != nil { return ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse{}, runtime.NewResponseError(resp) - } return client.listAvailableSSLPredefinedPoliciesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -802,6 +835,10 @@ func (client *ApplicationGatewaysClient) listAvailableSSLPredefinedPoliciesHandl // method. func (client *ApplicationGatewaysClient) ListAvailableServerVariables(ctx context.Context, options *ApplicationGatewaysClientListAvailableServerVariablesOptions) (ApplicationGatewaysClientListAvailableServerVariablesResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.ListAvailableServerVariables" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableServerVariablesCreateRequest(ctx, options) if err != nil { return ApplicationGatewaysClientListAvailableServerVariablesResponse{}, err @@ -853,6 +890,10 @@ func (client *ApplicationGatewaysClient) listAvailableServerVariablesHandleRespo // method. func (client *ApplicationGatewaysClient) ListAvailableWafRuleSets(ctx context.Context, options *ApplicationGatewaysClientListAvailableWafRuleSetsOptions) (ApplicationGatewaysClientListAvailableWafRuleSetsResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.ListAvailableWafRuleSets" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableWafRuleSetsCreateRequest(ctx, options) if err != nil { return ApplicationGatewaysClientListAvailableWafRuleSetsResponse{}, err @@ -912,10 +953,13 @@ func (client *ApplicationGatewaysClient) BeginStart(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientStartResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientStartResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientStartResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -925,6 +969,10 @@ func (client *ApplicationGatewaysClient) BeginStart(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) start(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *ApplicationGatewaysClientBeginStartOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginStart" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) if err != nil { return nil, err @@ -982,10 +1030,13 @@ func (client *ApplicationGatewaysClient) BeginStop(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationGatewaysClientStopResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationGatewaysClientStopResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationGatewaysClientStopResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -995,6 +1046,10 @@ func (client *ApplicationGatewaysClient) BeginStop(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *ApplicationGatewaysClient) stop(ctx context.Context, resourceGroupName string, applicationGatewayName string, options *ApplicationGatewaysClientBeginStopOptions) (*http.Response, error) { var err error + const operationName = "ApplicationGatewaysClient.BeginStop" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopCreateRequest(ctx, resourceGroupName, applicationGatewayName, options) if err != nil { return nil, err @@ -1047,6 +1102,10 @@ func (client *ApplicationGatewaysClient) stopCreateRequest(ctx context.Context, // method. func (client *ApplicationGatewaysClient) UpdateTags(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters TagsObject, options *ApplicationGatewaysClientUpdateTagsOptions) (ApplicationGatewaysClientUpdateTagsResponse, error) { var err error + const operationName = "ApplicationGatewaysClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, applicationGatewayName, parameters, options) if err != nil { return ApplicationGatewaysClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifests_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifests_client.go index 1860d3a73..6821c9769 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifests_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifests_client.go @@ -33,7 +33,7 @@ type ApplicationGatewayWafDynamicManifestsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationGatewayWafDynamicManifestsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationGatewayWafDynamicManifestsClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationGatewayWafDynamicManifestsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,25 +56,20 @@ func (client *ApplicationGatewayWafDynamicManifestsClient) NewGetPager(location return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationGatewayWafDynamicManifestsClientGetResponse) (ApplicationGatewayWafDynamicManifestsClientGetResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.getCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationGatewayWafDynamicManifestsClient.NewGetPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.getCreateRequest(ctx, location, options) + }, nil) if err != nil { return ApplicationGatewayWafDynamicManifestsClientGetResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ApplicationGatewayWafDynamicManifestsClientGetResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationGatewayWafDynamicManifestsClientGetResponse{}, runtime.NewResponseError(resp) - } return client.getHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifestsdefault_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifestsdefault_client.go index bcc9787bd..d189a5614 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifestsdefault_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifestsdefault_client.go @@ -33,7 +33,7 @@ type ApplicationGatewayWafDynamicManifestsDefaultClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationGatewayWafDynamicManifestsDefaultClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationGatewayWafDynamicManifestsDefaultClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationGatewayWafDynamicManifestsDefaultClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -53,6 +53,10 @@ func NewApplicationGatewayWafDynamicManifestsDefaultClient(subscriptionID string // method. func (client *ApplicationGatewayWafDynamicManifestsDefaultClient) Get(ctx context.Context, location string, options *ApplicationGatewayWafDynamicManifestsDefaultClientGetOptions) (ApplicationGatewayWafDynamicManifestsDefaultClientGetResponse, error) { var err error + const operationName = "ApplicationGatewayWafDynamicManifestsDefaultClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, location, options) if err != nil { return ApplicationGatewayWafDynamicManifestsDefaultClientGetResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationsecuritygroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationsecuritygroups_client.go index 31216f033..74f42055f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationsecuritygroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationsecuritygroups_client.go @@ -33,7 +33,7 @@ type ApplicationSecurityGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewApplicationSecurityGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ApplicationSecurityGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".ApplicationSecurityGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ApplicationSecurityGroupsClient) BeginCreateOrUpdate(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationSecurityGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationSecurityGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationSecurityGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ApplicationSecurityGroupsClient) BeginCreateOrUpdate(ctx context.C // Generated from API version 2023-05-01 func (client *ApplicationSecurityGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, parameters ApplicationSecurityGroup, options *ApplicationSecurityGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ApplicationSecurityGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, applicationSecurityGroupName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *ApplicationSecurityGroupsClient) BeginDelete(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ApplicationSecurityGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ApplicationSecurityGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ApplicationSecurityGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *ApplicationSecurityGroupsClient) BeginDelete(ctx context.Context, // Generated from API version 2023-05-01 func (client *ApplicationSecurityGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *ApplicationSecurityGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ApplicationSecurityGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, applicationSecurityGroupName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *ApplicationSecurityGroupsClient) deleteCreateRequest(ctx context.C // method. func (client *ApplicationSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *ApplicationSecurityGroupsClientGetOptions) (ApplicationSecurityGroupsClientGetResponse, error) { var err error + const operationName = "ApplicationSecurityGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, applicationSecurityGroupName, options) if err != nil { return ApplicationSecurityGroupsClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *ApplicationSecurityGroupsClient) NewListPager(resourceGroupName st return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationSecurityGroupsClientListResponse) (ApplicationSecurityGroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ApplicationSecurityGroupsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationSecurityGroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ApplicationSecurityGroupsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationSecurityGroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -325,25 +338,20 @@ func (client *ApplicationSecurityGroupsClient) NewListAllPager(options *Applicat return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ApplicationSecurityGroupsClientListAllResponse) (ApplicationSecurityGroupsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ApplicationSecurityGroupsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return ApplicationSecurityGroupsClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ApplicationSecurityGroupsClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ApplicationSecurityGroupsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -385,6 +393,10 @@ func (client *ApplicationSecurityGroupsClient) listAllHandleResponse(resp *http. // method. func (client *ApplicationSecurityGroupsClient) UpdateTags(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, parameters TagsObject, options *ApplicationSecurityGroupsClientUpdateTagsOptions) (ApplicationSecurityGroupsClientUpdateTagsResponse, error) { var err error + const operationName = "ApplicationSecurityGroupsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, applicationSecurityGroupName, parameters, options) if err != nil { return ApplicationSecurityGroupsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/autorest.md index b95836c20..1982a9354 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/autorest.md @@ -8,6 +8,6 @@ require: - https://github.com/Azure/azure-rest-api-specs/blob/80c21c17b4a7aa57f637ee594f7cfd653255a7e0/specification/network/resource-manager/readme.md - https://github.com/Azure/azure-rest-api-specs/blob/80c21c17b4a7aa57f637ee594f7cfd653255a7e0/specification/network/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 4.2.0 +module-version: 4.3.0 tag: package-2023-05 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availabledelegations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availabledelegations_client.go index d61e4f4ed..cd58ab741 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availabledelegations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availabledelegations_client.go @@ -33,7 +33,7 @@ type AvailableDelegationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAvailableDelegationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailableDelegationsClient, error) { - cl, err := arm.NewClient(moduleName+".AvailableDelegationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,25 +56,20 @@ func (client *AvailableDelegationsClient) NewListPager(location string, options return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailableDelegationsClientListResponse) (AvailableDelegationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailableDelegationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return AvailableDelegationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailableDelegationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailableDelegationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableendpointservices_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableendpointservices_client.go index 2e36c225f..e64ff0ed7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableendpointservices_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableendpointservices_client.go @@ -33,7 +33,7 @@ type AvailableEndpointServicesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAvailableEndpointServicesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailableEndpointServicesClient, error) { - cl, err := arm.NewClient(moduleName+".AvailableEndpointServicesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,25 +56,20 @@ func (client *AvailableEndpointServicesClient) NewListPager(location string, opt return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailableEndpointServicesClientListResponse) (AvailableEndpointServicesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailableEndpointServicesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return AvailableEndpointServicesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailableEndpointServicesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailableEndpointServicesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableprivateendpointtypes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableprivateendpointtypes_client.go index 5bcece5f1..dec555fa1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableprivateendpointtypes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableprivateendpointtypes_client.go @@ -33,7 +33,7 @@ type AvailablePrivateEndpointTypesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAvailablePrivateEndpointTypesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailablePrivateEndpointTypesClient, error) { - cl, err := arm.NewClient(moduleName+".AvailablePrivateEndpointTypesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *AvailablePrivateEndpointTypesClient) NewListPager(location string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailablePrivateEndpointTypesClientListResponse) (AvailablePrivateEndpointTypesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailablePrivateEndpointTypesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return AvailablePrivateEndpointTypesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailablePrivateEndpointTypesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailablePrivateEndpointTypesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -124,25 +119,20 @@ func (client *AvailablePrivateEndpointTypesClient) NewListByResourceGroupPager(l return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailablePrivateEndpointTypesClientListByResourceGroupResponse) (AvailablePrivateEndpointTypesClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, location, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailablePrivateEndpointTypesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, location, resourceGroupName, options) + }, nil) if err != nil { return AvailablePrivateEndpointTypesClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailablePrivateEndpointTypesClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailablePrivateEndpointTypesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableresourcegroupdelegations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableresourcegroupdelegations_client.go index efb41be84..c4916fbfc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableresourcegroupdelegations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableresourcegroupdelegations_client.go @@ -33,7 +33,7 @@ type AvailableResourceGroupDelegationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAvailableResourceGroupDelegationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailableResourceGroupDelegationsClient, error) { - cl, err := arm.NewClient(moduleName+".AvailableResourceGroupDelegationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *AvailableResourceGroupDelegationsClient) NewListPager(location str return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailableResourceGroupDelegationsClientListResponse) (AvailableResourceGroupDelegationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailableResourceGroupDelegationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, resourceGroupName, options) + }, nil) if err != nil { return AvailableResourceGroupDelegationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailableResourceGroupDelegationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailableResourceGroupDelegationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableservicealiases_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableservicealiases_client.go index 091be2480..ccb45a0c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableservicealiases_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableservicealiases_client.go @@ -33,7 +33,7 @@ type AvailableServiceAliasesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAvailableServiceAliasesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailableServiceAliasesClient, error) { - cl, err := arm.NewClient(moduleName+".AvailableServiceAliasesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,25 +56,20 @@ func (client *AvailableServiceAliasesClient) NewListPager(location string, optio return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailableServiceAliasesClientListResponse) (AvailableServiceAliasesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailableServiceAliasesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return AvailableServiceAliasesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailableServiceAliasesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailableServiceAliasesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -122,25 +117,20 @@ func (client *AvailableServiceAliasesClient) NewListByResourceGroupPager(resourc return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AvailableServiceAliasesClientListByResourceGroupResponse) (AvailableServiceAliasesClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AvailableServiceAliasesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, location, options) + }, nil) if err != nil { return AvailableServiceAliasesClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AvailableServiceAliasesClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AvailableServiceAliasesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewallfqdntags_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewallfqdntags_client.go index 84056a911..dfd02e2ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewallfqdntags_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewallfqdntags_client.go @@ -33,7 +33,7 @@ type AzureFirewallFqdnTagsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAzureFirewallFqdnTagsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AzureFirewallFqdnTagsClient, error) { - cl, err := arm.NewClient(moduleName+".AzureFirewallFqdnTagsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,25 +55,20 @@ func (client *AzureFirewallFqdnTagsClient) NewListAllPager(options *AzureFirewal return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AzureFirewallFqdnTagsClientListAllResponse) (AzureFirewallFqdnTagsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AzureFirewallFqdnTagsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return AzureFirewallFqdnTagsClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AzureFirewallFqdnTagsClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AzureFirewallFqdnTagsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewalls_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewalls_client.go index 0563c3397..f46875f7c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewalls_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewalls_client.go @@ -33,7 +33,7 @@ type AzureFirewallsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewAzureFirewallsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AzureFirewallsClient, error) { - cl, err := arm.NewClient(moduleName+".AzureFirewallsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *AzureFirewallsClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AzureFirewallsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AzureFirewallsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AzureFirewallsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *AzureFirewallsClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *AzureFirewallsClient) createOrUpdate(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters AzureFirewall, options *AzureFirewallsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "AzureFirewallsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, azureFirewallName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *AzureFirewallsClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AzureFirewallsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AzureFirewallsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AzureFirewallsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *AzureFirewallsClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *AzureFirewallsClient) deleteOperation(ctx context.Context, resourceGroupName string, azureFirewallName string, options *AzureFirewallsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "AzureFirewallsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, azureFirewallName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *AzureFirewallsClient) deleteCreateRequest(ctx context.Context, res // - options - AzureFirewallsClientGetOptions contains the optional parameters for the AzureFirewallsClient.Get method. func (client *AzureFirewallsClient) Get(ctx context.Context, resourceGroupName string, azureFirewallName string, options *AzureFirewallsClientGetOptions) (AzureFirewallsClientGetResponse, error) { var err error + const operationName = "AzureFirewallsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, azureFirewallName, options) if err != nil { return AzureFirewallsClientGetResponse{}, err @@ -259,25 +277,20 @@ func (client *AzureFirewallsClient) NewListPager(resourceGroupName string, optio return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AzureFirewallsClientListResponse) (AzureFirewallsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AzureFirewallsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return AzureFirewallsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AzureFirewallsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AzureFirewallsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -323,25 +336,20 @@ func (client *AzureFirewallsClient) NewListAllPager(options *AzureFirewallsClien return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *AzureFirewallsClientListAllResponse) (AzureFirewallsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AzureFirewallsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return AzureFirewallsClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AzureFirewallsClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AzureFirewallsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -388,10 +396,13 @@ func (client *AzureFirewallsClient) BeginListLearnedPrefixes(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AzureFirewallsClientListLearnedPrefixesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AzureFirewallsClientListLearnedPrefixesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AzureFirewallsClientListLearnedPrefixesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -401,6 +412,10 @@ func (client *AzureFirewallsClient) BeginListLearnedPrefixes(ctx context.Context // Generated from API version 2023-05-01 func (client *AzureFirewallsClient) listLearnedPrefixes(ctx context.Context, resourceGroupName string, azureFirewallName string, options *AzureFirewallsClientBeginListLearnedPrefixesOptions) (*http.Response, error) { var err error + const operationName = "AzureFirewallsClient.BeginListLearnedPrefixes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listLearnedPrefixesCreateRequest(ctx, resourceGroupName, azureFirewallName, options) if err != nil { return nil, err @@ -459,10 +474,13 @@ func (client *AzureFirewallsClient) BeginPacketCapture(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AzureFirewallsClientPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AzureFirewallsClientPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AzureFirewallsClientPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -472,6 +490,10 @@ func (client *AzureFirewallsClient) BeginPacketCapture(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *AzureFirewallsClient) packetCapture(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters FirewallPacketCaptureParameters, options *AzureFirewallsClientBeginPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "AzureFirewallsClient.BeginPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.packetCaptureCreateRequest(ctx, resourceGroupName, azureFirewallName, parameters, options) if err != nil { return nil, err @@ -533,10 +555,13 @@ func (client *AzureFirewallsClient) BeginUpdateTags(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AzureFirewallsClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[AzureFirewallsClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AzureFirewallsClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -546,6 +571,10 @@ func (client *AzureFirewallsClient) BeginUpdateTags(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *AzureFirewallsClient) updateTags(ctx context.Context, resourceGroupName string, azureFirewallName string, parameters TagsObject, options *AzureFirewallsClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "AzureFirewallsClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, azureFirewallName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bastionhosts_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bastionhosts_client.go index b506493d6..315c2d66b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bastionhosts_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bastionhosts_client.go @@ -33,7 +33,7 @@ type BastionHostsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewBastionHostsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*BastionHostsClient, error) { - cl, err := arm.NewClient(moduleName+".BastionHostsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *BastionHostsClient) BeginCreateOrUpdate(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BastionHostsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[BastionHostsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[BastionHostsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *BastionHostsClient) BeginCreateOrUpdate(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *BastionHostsClient) createOrUpdate(ctx context.Context, resourceGroupName string, bastionHostName string, parameters BastionHost, options *BastionHostsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "BastionHostsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, bastionHostName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *BastionHostsClient) BeginDelete(ctx context.Context, resourceGroup } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BastionHostsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[BastionHostsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[BastionHostsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *BastionHostsClient) BeginDelete(ctx context.Context, resourceGroup // Generated from API version 2023-05-01 func (client *BastionHostsClient) deleteOperation(ctx context.Context, resourceGroupName string, bastionHostName string, options *BastionHostsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "BastionHostsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, bastionHostName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *BastionHostsClient) deleteCreateRequest(ctx context.Context, resou // - options - BastionHostsClientGetOptions contains the optional parameters for the BastionHostsClient.Get method. func (client *BastionHostsClient) Get(ctx context.Context, resourceGroupName string, bastionHostName string, options *BastionHostsClientGetOptions) (BastionHostsClientGetResponse, error) { var err error + const operationName = "BastionHostsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, bastionHostName, options) if err != nil { return BastionHostsClientGetResponse{}, err @@ -258,25 +276,20 @@ func (client *BastionHostsClient) NewListPager(options *BastionHostsClientListOp return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *BastionHostsClientListResponse) (BastionHostsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return BastionHostsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "BastionHostsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return BastionHostsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BastionHostsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -319,25 +332,20 @@ func (client *BastionHostsClient) NewListByResourceGroupPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *BastionHostsClientListByResourceGroupResponse) (BastionHostsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "BastionHostsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return BastionHostsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BastionHostsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BastionHostsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -389,10 +397,13 @@ func (client *BastionHostsClient) BeginUpdateTags(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[BastionHostsClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[BastionHostsClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[BastionHostsClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -402,6 +413,10 @@ func (client *BastionHostsClient) BeginUpdateTags(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *BastionHostsClient) updateTags(ctx context.Context, resourceGroupName string, bastionHostName string, parameters TagsObject, options *BastionHostsClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "BastionHostsClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, bastionHostName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bgpservicecommunities_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bgpservicecommunities_client.go index 6ed81473f..5364936f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bgpservicecommunities_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bgpservicecommunities_client.go @@ -33,7 +33,7 @@ type BgpServiceCommunitiesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewBgpServiceCommunitiesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*BgpServiceCommunitiesClient, error) { - cl, err := arm.NewClient(moduleName+".BgpServiceCommunitiesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,25 +55,20 @@ func (client *BgpServiceCommunitiesClient) NewListPager(options *BgpServiceCommu return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *BgpServiceCommunitiesClientListResponse) (BgpServiceCommunitiesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "BgpServiceCommunitiesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return BgpServiceCommunitiesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BgpServiceCommunitiesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BgpServiceCommunitiesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/build.go index 6cb650356..a22250100 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/build.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/build.go @@ -2,6 +2,6 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // This file enables 'go generate' to regenerate this specific SDK -//go:generate pwsh ../../../../eng/scripts/build.ps1 -goExtension "@autorest/go@4.0.0-preview.54" -skipBuild -cleanGenerated -format -tidy -generate resourcemanager/network/armnetwork +//go:generate pwsh ../../../../eng/scripts/build.ps1 -skipBuild -cleanGenerated -format -tidy -generate resourcemanager/network/armnetwork package armnetwork diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/client_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/client_factory.go index 67883a217..b354e3dbb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/client_factory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/client_factory.go @@ -28,7 +28,7 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName+".ClientFactory", moduleVersion, credential, options) + _, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -38,666 +38,799 @@ func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, }, nil } +// NewAdminRuleCollectionsClient creates a new instance of AdminRuleCollectionsClient. func (c *ClientFactory) NewAdminRuleCollectionsClient() *AdminRuleCollectionsClient { subClient, _ := NewAdminRuleCollectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAdminRulesClient creates a new instance of AdminRulesClient. func (c *ClientFactory) NewAdminRulesClient() *AdminRulesClient { subClient, _ := NewAdminRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationGatewayPrivateEndpointConnectionsClient creates a new instance of ApplicationGatewayPrivateEndpointConnectionsClient. func (c *ClientFactory) NewApplicationGatewayPrivateEndpointConnectionsClient() *ApplicationGatewayPrivateEndpointConnectionsClient { subClient, _ := NewApplicationGatewayPrivateEndpointConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationGatewayPrivateLinkResourcesClient creates a new instance of ApplicationGatewayPrivateLinkResourcesClient. func (c *ClientFactory) NewApplicationGatewayPrivateLinkResourcesClient() *ApplicationGatewayPrivateLinkResourcesClient { subClient, _ := NewApplicationGatewayPrivateLinkResourcesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationGatewayWafDynamicManifestsClient creates a new instance of ApplicationGatewayWafDynamicManifestsClient. func (c *ClientFactory) NewApplicationGatewayWafDynamicManifestsClient() *ApplicationGatewayWafDynamicManifestsClient { subClient, _ := NewApplicationGatewayWafDynamicManifestsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationGatewayWafDynamicManifestsDefaultClient creates a new instance of ApplicationGatewayWafDynamicManifestsDefaultClient. func (c *ClientFactory) NewApplicationGatewayWafDynamicManifestsDefaultClient() *ApplicationGatewayWafDynamicManifestsDefaultClient { subClient, _ := NewApplicationGatewayWafDynamicManifestsDefaultClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationGatewaysClient creates a new instance of ApplicationGatewaysClient. func (c *ClientFactory) NewApplicationGatewaysClient() *ApplicationGatewaysClient { subClient, _ := NewApplicationGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewApplicationSecurityGroupsClient creates a new instance of ApplicationSecurityGroupsClient. func (c *ClientFactory) NewApplicationSecurityGroupsClient() *ApplicationSecurityGroupsClient { subClient, _ := NewApplicationSecurityGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAvailableDelegationsClient creates a new instance of AvailableDelegationsClient. func (c *ClientFactory) NewAvailableDelegationsClient() *AvailableDelegationsClient { subClient, _ := NewAvailableDelegationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAvailableEndpointServicesClient creates a new instance of AvailableEndpointServicesClient. func (c *ClientFactory) NewAvailableEndpointServicesClient() *AvailableEndpointServicesClient { subClient, _ := NewAvailableEndpointServicesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAvailablePrivateEndpointTypesClient creates a new instance of AvailablePrivateEndpointTypesClient. func (c *ClientFactory) NewAvailablePrivateEndpointTypesClient() *AvailablePrivateEndpointTypesClient { subClient, _ := NewAvailablePrivateEndpointTypesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAvailableResourceGroupDelegationsClient creates a new instance of AvailableResourceGroupDelegationsClient. func (c *ClientFactory) NewAvailableResourceGroupDelegationsClient() *AvailableResourceGroupDelegationsClient { subClient, _ := NewAvailableResourceGroupDelegationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAvailableServiceAliasesClient creates a new instance of AvailableServiceAliasesClient. func (c *ClientFactory) NewAvailableServiceAliasesClient() *AvailableServiceAliasesClient { subClient, _ := NewAvailableServiceAliasesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAzureFirewallFqdnTagsClient creates a new instance of AzureFirewallFqdnTagsClient. func (c *ClientFactory) NewAzureFirewallFqdnTagsClient() *AzureFirewallFqdnTagsClient { subClient, _ := NewAzureFirewallFqdnTagsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewAzureFirewallsClient creates a new instance of AzureFirewallsClient. func (c *ClientFactory) NewAzureFirewallsClient() *AzureFirewallsClient { subClient, _ := NewAzureFirewallsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewBastionHostsClient creates a new instance of BastionHostsClient. func (c *ClientFactory) NewBastionHostsClient() *BastionHostsClient { subClient, _ := NewBastionHostsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewBgpServiceCommunitiesClient creates a new instance of BgpServiceCommunitiesClient. func (c *ClientFactory) NewBgpServiceCommunitiesClient() *BgpServiceCommunitiesClient { subClient, _ := NewBgpServiceCommunitiesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewConfigurationPolicyGroupsClient creates a new instance of ConfigurationPolicyGroupsClient. func (c *ClientFactory) NewConfigurationPolicyGroupsClient() *ConfigurationPolicyGroupsClient { subClient, _ := NewConfigurationPolicyGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewConnectionMonitorsClient creates a new instance of ConnectionMonitorsClient. func (c *ClientFactory) NewConnectionMonitorsClient() *ConnectionMonitorsClient { subClient, _ := NewConnectionMonitorsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewConnectivityConfigurationsClient creates a new instance of ConnectivityConfigurationsClient. func (c *ClientFactory) NewConnectivityConfigurationsClient() *ConnectivityConfigurationsClient { subClient, _ := NewConnectivityConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewCustomIPPrefixesClient creates a new instance of CustomIPPrefixesClient. func (c *ClientFactory) NewCustomIPPrefixesClient() *CustomIPPrefixesClient { subClient, _ := NewCustomIPPrefixesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewDdosCustomPoliciesClient creates a new instance of DdosCustomPoliciesClient. func (c *ClientFactory) NewDdosCustomPoliciesClient() *DdosCustomPoliciesClient { subClient, _ := NewDdosCustomPoliciesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewDdosProtectionPlansClient creates a new instance of DdosProtectionPlansClient. func (c *ClientFactory) NewDdosProtectionPlansClient() *DdosProtectionPlansClient { subClient, _ := NewDdosProtectionPlansClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewDefaultSecurityRulesClient creates a new instance of DefaultSecurityRulesClient. func (c *ClientFactory) NewDefaultSecurityRulesClient() *DefaultSecurityRulesClient { subClient, _ := NewDefaultSecurityRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewDscpConfigurationClient creates a new instance of DscpConfigurationClient. func (c *ClientFactory) NewDscpConfigurationClient() *DscpConfigurationClient { subClient, _ := NewDscpConfigurationClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCircuitAuthorizationsClient creates a new instance of ExpressRouteCircuitAuthorizationsClient. func (c *ClientFactory) NewExpressRouteCircuitAuthorizationsClient() *ExpressRouteCircuitAuthorizationsClient { subClient, _ := NewExpressRouteCircuitAuthorizationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCircuitConnectionsClient creates a new instance of ExpressRouteCircuitConnectionsClient. func (c *ClientFactory) NewExpressRouteCircuitConnectionsClient() *ExpressRouteCircuitConnectionsClient { subClient, _ := NewExpressRouteCircuitConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCircuitPeeringsClient creates a new instance of ExpressRouteCircuitPeeringsClient. func (c *ClientFactory) NewExpressRouteCircuitPeeringsClient() *ExpressRouteCircuitPeeringsClient { subClient, _ := NewExpressRouteCircuitPeeringsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCircuitsClient creates a new instance of ExpressRouteCircuitsClient. func (c *ClientFactory) NewExpressRouteCircuitsClient() *ExpressRouteCircuitsClient { subClient, _ := NewExpressRouteCircuitsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteConnectionsClient creates a new instance of ExpressRouteConnectionsClient. func (c *ClientFactory) NewExpressRouteConnectionsClient() *ExpressRouteConnectionsClient { subClient, _ := NewExpressRouteConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCrossConnectionPeeringsClient creates a new instance of ExpressRouteCrossConnectionPeeringsClient. func (c *ClientFactory) NewExpressRouteCrossConnectionPeeringsClient() *ExpressRouteCrossConnectionPeeringsClient { subClient, _ := NewExpressRouteCrossConnectionPeeringsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteCrossConnectionsClient creates a new instance of ExpressRouteCrossConnectionsClient. func (c *ClientFactory) NewExpressRouteCrossConnectionsClient() *ExpressRouteCrossConnectionsClient { subClient, _ := NewExpressRouteCrossConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteGatewaysClient creates a new instance of ExpressRouteGatewaysClient. func (c *ClientFactory) NewExpressRouteGatewaysClient() *ExpressRouteGatewaysClient { subClient, _ := NewExpressRouteGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteLinksClient creates a new instance of ExpressRouteLinksClient. func (c *ClientFactory) NewExpressRouteLinksClient() *ExpressRouteLinksClient { subClient, _ := NewExpressRouteLinksClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRoutePortAuthorizationsClient creates a new instance of ExpressRoutePortAuthorizationsClient. func (c *ClientFactory) NewExpressRoutePortAuthorizationsClient() *ExpressRoutePortAuthorizationsClient { subClient, _ := NewExpressRoutePortAuthorizationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRoutePortsClient creates a new instance of ExpressRoutePortsClient. func (c *ClientFactory) NewExpressRoutePortsClient() *ExpressRoutePortsClient { subClient, _ := NewExpressRoutePortsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRoutePortsLocationsClient creates a new instance of ExpressRoutePortsLocationsClient. func (c *ClientFactory) NewExpressRoutePortsLocationsClient() *ExpressRoutePortsLocationsClient { subClient, _ := NewExpressRoutePortsLocationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteProviderPortsLocationClient creates a new instance of ExpressRouteProviderPortsLocationClient. func (c *ClientFactory) NewExpressRouteProviderPortsLocationClient() *ExpressRouteProviderPortsLocationClient { subClient, _ := NewExpressRouteProviderPortsLocationClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewExpressRouteServiceProvidersClient creates a new instance of ExpressRouteServiceProvidersClient. func (c *ClientFactory) NewExpressRouteServiceProvidersClient() *ExpressRouteServiceProvidersClient { subClient, _ := NewExpressRouteServiceProvidersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFirewallPoliciesClient creates a new instance of FirewallPoliciesClient. func (c *ClientFactory) NewFirewallPoliciesClient() *FirewallPoliciesClient { subClient, _ := NewFirewallPoliciesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFirewallPolicyIdpsSignaturesClient creates a new instance of FirewallPolicyIdpsSignaturesClient. func (c *ClientFactory) NewFirewallPolicyIdpsSignaturesClient() *FirewallPolicyIdpsSignaturesClient { subClient, _ := NewFirewallPolicyIdpsSignaturesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFirewallPolicyIdpsSignaturesFilterValuesClient creates a new instance of FirewallPolicyIdpsSignaturesFilterValuesClient. func (c *ClientFactory) NewFirewallPolicyIdpsSignaturesFilterValuesClient() *FirewallPolicyIdpsSignaturesFilterValuesClient { subClient, _ := NewFirewallPolicyIdpsSignaturesFilterValuesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFirewallPolicyIdpsSignaturesOverridesClient creates a new instance of FirewallPolicyIdpsSignaturesOverridesClient. func (c *ClientFactory) NewFirewallPolicyIdpsSignaturesOverridesClient() *FirewallPolicyIdpsSignaturesOverridesClient { subClient, _ := NewFirewallPolicyIdpsSignaturesOverridesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFirewallPolicyRuleCollectionGroupsClient creates a new instance of FirewallPolicyRuleCollectionGroupsClient. func (c *ClientFactory) NewFirewallPolicyRuleCollectionGroupsClient() *FirewallPolicyRuleCollectionGroupsClient { subClient, _ := NewFirewallPolicyRuleCollectionGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewFlowLogsClient creates a new instance of FlowLogsClient. func (c *ClientFactory) NewFlowLogsClient() *FlowLogsClient { subClient, _ := NewFlowLogsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewGroupsClient creates a new instance of GroupsClient. func (c *ClientFactory) NewGroupsClient() *GroupsClient { subClient, _ := NewGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewHubRouteTablesClient creates a new instance of HubRouteTablesClient. func (c *ClientFactory) NewHubRouteTablesClient() *HubRouteTablesClient { subClient, _ := NewHubRouteTablesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewHubVirtualNetworkConnectionsClient creates a new instance of HubVirtualNetworkConnectionsClient. func (c *ClientFactory) NewHubVirtualNetworkConnectionsClient() *HubVirtualNetworkConnectionsClient { subClient, _ := NewHubVirtualNetworkConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewIPAllocationsClient creates a new instance of IPAllocationsClient. func (c *ClientFactory) NewIPAllocationsClient() *IPAllocationsClient { subClient, _ := NewIPAllocationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewIPGroupsClient creates a new instance of IPGroupsClient. func (c *ClientFactory) NewIPGroupsClient() *IPGroupsClient { subClient, _ := NewIPGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInboundNatRulesClient creates a new instance of InboundNatRulesClient. func (c *ClientFactory) NewInboundNatRulesClient() *InboundNatRulesClient { subClient, _ := NewInboundNatRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInboundSecurityRuleClient creates a new instance of InboundSecurityRuleClient. func (c *ClientFactory) NewInboundSecurityRuleClient() *InboundSecurityRuleClient { subClient, _ := NewInboundSecurityRuleClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInterfaceIPConfigurationsClient creates a new instance of InterfaceIPConfigurationsClient. func (c *ClientFactory) NewInterfaceIPConfigurationsClient() *InterfaceIPConfigurationsClient { subClient, _ := NewInterfaceIPConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInterfaceLoadBalancersClient creates a new instance of InterfaceLoadBalancersClient. func (c *ClientFactory) NewInterfaceLoadBalancersClient() *InterfaceLoadBalancersClient { subClient, _ := NewInterfaceLoadBalancersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInterfaceTapConfigurationsClient creates a new instance of InterfaceTapConfigurationsClient. func (c *ClientFactory) NewInterfaceTapConfigurationsClient() *InterfaceTapConfigurationsClient { subClient, _ := NewInterfaceTapConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewInterfacesClient creates a new instance of InterfacesClient. func (c *ClientFactory) NewInterfacesClient() *InterfacesClient { subClient, _ := NewInterfacesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerBackendAddressPoolsClient creates a new instance of LoadBalancerBackendAddressPoolsClient. func (c *ClientFactory) NewLoadBalancerBackendAddressPoolsClient() *LoadBalancerBackendAddressPoolsClient { subClient, _ := NewLoadBalancerBackendAddressPoolsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerFrontendIPConfigurationsClient creates a new instance of LoadBalancerFrontendIPConfigurationsClient. func (c *ClientFactory) NewLoadBalancerFrontendIPConfigurationsClient() *LoadBalancerFrontendIPConfigurationsClient { subClient, _ := NewLoadBalancerFrontendIPConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerLoadBalancingRulesClient creates a new instance of LoadBalancerLoadBalancingRulesClient. func (c *ClientFactory) NewLoadBalancerLoadBalancingRulesClient() *LoadBalancerLoadBalancingRulesClient { subClient, _ := NewLoadBalancerLoadBalancingRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerNetworkInterfacesClient creates a new instance of LoadBalancerNetworkInterfacesClient. func (c *ClientFactory) NewLoadBalancerNetworkInterfacesClient() *LoadBalancerNetworkInterfacesClient { subClient, _ := NewLoadBalancerNetworkInterfacesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerOutboundRulesClient creates a new instance of LoadBalancerOutboundRulesClient. func (c *ClientFactory) NewLoadBalancerOutboundRulesClient() *LoadBalancerOutboundRulesClient { subClient, _ := NewLoadBalancerOutboundRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancerProbesClient creates a new instance of LoadBalancerProbesClient. func (c *ClientFactory) NewLoadBalancerProbesClient() *LoadBalancerProbesClient { subClient, _ := NewLoadBalancerProbesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLoadBalancersClient creates a new instance of LoadBalancersClient. func (c *ClientFactory) NewLoadBalancersClient() *LoadBalancersClient { subClient, _ := NewLoadBalancersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewLocalNetworkGatewaysClient creates a new instance of LocalNetworkGatewaysClient. func (c *ClientFactory) NewLocalNetworkGatewaysClient() *LocalNetworkGatewaysClient { subClient, _ := NewLocalNetworkGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewManagementClient creates a new instance of ManagementClient. func (c *ClientFactory) NewManagementClient() *ManagementClient { subClient, _ := NewManagementClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewManagementGroupNetworkManagerConnectionsClient creates a new instance of ManagementGroupNetworkManagerConnectionsClient. func (c *ClientFactory) NewManagementGroupNetworkManagerConnectionsClient() *ManagementGroupNetworkManagerConnectionsClient { subClient, _ := NewManagementGroupNetworkManagerConnectionsClient(c.credential, c.options) return subClient } +// NewManagerCommitsClient creates a new instance of ManagerCommitsClient. func (c *ClientFactory) NewManagerCommitsClient() *ManagerCommitsClient { subClient, _ := NewManagerCommitsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewManagerDeploymentStatusClient creates a new instance of ManagerDeploymentStatusClient. func (c *ClientFactory) NewManagerDeploymentStatusClient() *ManagerDeploymentStatusClient { subClient, _ := NewManagerDeploymentStatusClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewManagersClient creates a new instance of ManagersClient. func (c *ClientFactory) NewManagersClient() *ManagersClient { subClient, _ := NewManagersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewNatGatewaysClient creates a new instance of NatGatewaysClient. func (c *ClientFactory) NewNatGatewaysClient() *NatGatewaysClient { subClient, _ := NewNatGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewNatRulesClient creates a new instance of NatRulesClient. func (c *ClientFactory) NewNatRulesClient() *NatRulesClient { subClient, _ := NewNatRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { subClient, _ := NewOperationsClient(c.credential, c.options) return subClient } +// NewP2SVPNGatewaysClient creates a new instance of P2SVPNGatewaysClient. func (c *ClientFactory) NewP2SVPNGatewaysClient() *P2SVPNGatewaysClient { subClient, _ := NewP2SVPNGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPacketCapturesClient creates a new instance of PacketCapturesClient. func (c *ClientFactory) NewPacketCapturesClient() *PacketCapturesClient { subClient, _ := NewPacketCapturesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPeerExpressRouteCircuitConnectionsClient creates a new instance of PeerExpressRouteCircuitConnectionsClient. func (c *ClientFactory) NewPeerExpressRouteCircuitConnectionsClient() *PeerExpressRouteCircuitConnectionsClient { subClient, _ := NewPeerExpressRouteCircuitConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPrivateDNSZoneGroupsClient creates a new instance of PrivateDNSZoneGroupsClient. func (c *ClientFactory) NewPrivateDNSZoneGroupsClient() *PrivateDNSZoneGroupsClient { subClient, _ := NewPrivateDNSZoneGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPrivateEndpointsClient creates a new instance of PrivateEndpointsClient. func (c *ClientFactory) NewPrivateEndpointsClient() *PrivateEndpointsClient { subClient, _ := NewPrivateEndpointsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPrivateLinkServicesClient creates a new instance of PrivateLinkServicesClient. func (c *ClientFactory) NewPrivateLinkServicesClient() *PrivateLinkServicesClient { subClient, _ := NewPrivateLinkServicesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewProfilesClient creates a new instance of ProfilesClient. func (c *ClientFactory) NewProfilesClient() *ProfilesClient { subClient, _ := NewProfilesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPublicIPAddressesClient creates a new instance of PublicIPAddressesClient. func (c *ClientFactory) NewPublicIPAddressesClient() *PublicIPAddressesClient { subClient, _ := NewPublicIPAddressesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewPublicIPPrefixesClient creates a new instance of PublicIPPrefixesClient. func (c *ClientFactory) NewPublicIPPrefixesClient() *PublicIPPrefixesClient { subClient, _ := NewPublicIPPrefixesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewResourceNavigationLinksClient creates a new instance of ResourceNavigationLinksClient. func (c *ClientFactory) NewResourceNavigationLinksClient() *ResourceNavigationLinksClient { subClient, _ := NewResourceNavigationLinksClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRouteFilterRulesClient creates a new instance of RouteFilterRulesClient. func (c *ClientFactory) NewRouteFilterRulesClient() *RouteFilterRulesClient { subClient, _ := NewRouteFilterRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRouteFiltersClient creates a new instance of RouteFiltersClient. func (c *ClientFactory) NewRouteFiltersClient() *RouteFiltersClient { subClient, _ := NewRouteFiltersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRouteMapsClient creates a new instance of RouteMapsClient. func (c *ClientFactory) NewRouteMapsClient() *RouteMapsClient { subClient, _ := NewRouteMapsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRouteTablesClient creates a new instance of RouteTablesClient. func (c *ClientFactory) NewRouteTablesClient() *RouteTablesClient { subClient, _ := NewRouteTablesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRoutesClient creates a new instance of RoutesClient. func (c *ClientFactory) NewRoutesClient() *RoutesClient { subClient, _ := NewRoutesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewRoutingIntentClient creates a new instance of RoutingIntentClient. func (c *ClientFactory) NewRoutingIntentClient() *RoutingIntentClient { subClient, _ := NewRoutingIntentClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewScopeConnectionsClient creates a new instance of ScopeConnectionsClient. func (c *ClientFactory) NewScopeConnectionsClient() *ScopeConnectionsClient { subClient, _ := NewScopeConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSecurityAdminConfigurationsClient creates a new instance of SecurityAdminConfigurationsClient. func (c *ClientFactory) NewSecurityAdminConfigurationsClient() *SecurityAdminConfigurationsClient { subClient, _ := NewSecurityAdminConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSecurityGroupsClient creates a new instance of SecurityGroupsClient. func (c *ClientFactory) NewSecurityGroupsClient() *SecurityGroupsClient { subClient, _ := NewSecurityGroupsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSecurityPartnerProvidersClient creates a new instance of SecurityPartnerProvidersClient. func (c *ClientFactory) NewSecurityPartnerProvidersClient() *SecurityPartnerProvidersClient { subClient, _ := NewSecurityPartnerProvidersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSecurityRulesClient creates a new instance of SecurityRulesClient. func (c *ClientFactory) NewSecurityRulesClient() *SecurityRulesClient { subClient, _ := NewSecurityRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewServiceAssociationLinksClient creates a new instance of ServiceAssociationLinksClient. func (c *ClientFactory) NewServiceAssociationLinksClient() *ServiceAssociationLinksClient { subClient, _ := NewServiceAssociationLinksClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewServiceEndpointPoliciesClient creates a new instance of ServiceEndpointPoliciesClient. func (c *ClientFactory) NewServiceEndpointPoliciesClient() *ServiceEndpointPoliciesClient { subClient, _ := NewServiceEndpointPoliciesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewServiceEndpointPolicyDefinitionsClient creates a new instance of ServiceEndpointPolicyDefinitionsClient. func (c *ClientFactory) NewServiceEndpointPolicyDefinitionsClient() *ServiceEndpointPolicyDefinitionsClient { subClient, _ := NewServiceEndpointPolicyDefinitionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewServiceTagInformationClient creates a new instance of ServiceTagInformationClient. func (c *ClientFactory) NewServiceTagInformationClient() *ServiceTagInformationClient { subClient, _ := NewServiceTagInformationClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewServiceTagsClient creates a new instance of ServiceTagsClient. func (c *ClientFactory) NewServiceTagsClient() *ServiceTagsClient { subClient, _ := NewServiceTagsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewStaticMembersClient creates a new instance of StaticMembersClient. func (c *ClientFactory) NewStaticMembersClient() *StaticMembersClient { subClient, _ := NewStaticMembersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSubnetsClient creates a new instance of SubnetsClient. func (c *ClientFactory) NewSubnetsClient() *SubnetsClient { subClient, _ := NewSubnetsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewSubscriptionNetworkManagerConnectionsClient creates a new instance of SubscriptionNetworkManagerConnectionsClient. func (c *ClientFactory) NewSubscriptionNetworkManagerConnectionsClient() *SubscriptionNetworkManagerConnectionsClient { subClient, _ := NewSubscriptionNetworkManagerConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewUsagesClient creates a new instance of UsagesClient. func (c *ClientFactory) NewUsagesClient() *UsagesClient { subClient, _ := NewUsagesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNConnectionsClient creates a new instance of VPNConnectionsClient. func (c *ClientFactory) NewVPNConnectionsClient() *VPNConnectionsClient { subClient, _ := NewVPNConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNGatewaysClient creates a new instance of VPNGatewaysClient. func (c *ClientFactory) NewVPNGatewaysClient() *VPNGatewaysClient { subClient, _ := NewVPNGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNLinkConnectionsClient creates a new instance of VPNLinkConnectionsClient. func (c *ClientFactory) NewVPNLinkConnectionsClient() *VPNLinkConnectionsClient { subClient, _ := NewVPNLinkConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNServerConfigurationsAssociatedWithVirtualWanClient creates a new instance of VPNServerConfigurationsAssociatedWithVirtualWanClient. func (c *ClientFactory) NewVPNServerConfigurationsAssociatedWithVirtualWanClient() *VPNServerConfigurationsAssociatedWithVirtualWanClient { subClient, _ := NewVPNServerConfigurationsAssociatedWithVirtualWanClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNServerConfigurationsClient creates a new instance of VPNServerConfigurationsClient. func (c *ClientFactory) NewVPNServerConfigurationsClient() *VPNServerConfigurationsClient { subClient, _ := NewVPNServerConfigurationsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNSiteLinkConnectionsClient creates a new instance of VPNSiteLinkConnectionsClient. func (c *ClientFactory) NewVPNSiteLinkConnectionsClient() *VPNSiteLinkConnectionsClient { subClient, _ := NewVPNSiteLinkConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNSiteLinksClient creates a new instance of VPNSiteLinksClient. func (c *ClientFactory) NewVPNSiteLinksClient() *VPNSiteLinksClient { subClient, _ := NewVPNSiteLinksClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNSitesClient creates a new instance of VPNSitesClient. func (c *ClientFactory) NewVPNSitesClient() *VPNSitesClient { subClient, _ := NewVPNSitesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVPNSitesConfigurationClient creates a new instance of VPNSitesConfigurationClient. func (c *ClientFactory) NewVPNSitesConfigurationClient() *VPNSitesConfigurationClient { subClient, _ := NewVPNSitesConfigurationClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVipSwapClient creates a new instance of VipSwapClient. func (c *ClientFactory) NewVipSwapClient() *VipSwapClient { subClient, _ := NewVipSwapClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualApplianceConnectionsClient creates a new instance of VirtualApplianceConnectionsClient. func (c *ClientFactory) NewVirtualApplianceConnectionsClient() *VirtualApplianceConnectionsClient { subClient, _ := NewVirtualApplianceConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualApplianceSKUsClient creates a new instance of VirtualApplianceSKUsClient. func (c *ClientFactory) NewVirtualApplianceSKUsClient() *VirtualApplianceSKUsClient { subClient, _ := NewVirtualApplianceSKUsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualApplianceSitesClient creates a new instance of VirtualApplianceSitesClient. func (c *ClientFactory) NewVirtualApplianceSitesClient() *VirtualApplianceSitesClient { subClient, _ := NewVirtualApplianceSitesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualAppliancesClient creates a new instance of VirtualAppliancesClient. func (c *ClientFactory) NewVirtualAppliancesClient() *VirtualAppliancesClient { subClient, _ := NewVirtualAppliancesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualHubBgpConnectionClient creates a new instance of VirtualHubBgpConnectionClient. func (c *ClientFactory) NewVirtualHubBgpConnectionClient() *VirtualHubBgpConnectionClient { subClient, _ := NewVirtualHubBgpConnectionClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualHubBgpConnectionsClient creates a new instance of VirtualHubBgpConnectionsClient. func (c *ClientFactory) NewVirtualHubBgpConnectionsClient() *VirtualHubBgpConnectionsClient { subClient, _ := NewVirtualHubBgpConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualHubIPConfigurationClient creates a new instance of VirtualHubIPConfigurationClient. func (c *ClientFactory) NewVirtualHubIPConfigurationClient() *VirtualHubIPConfigurationClient { subClient, _ := NewVirtualHubIPConfigurationClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualHubRouteTableV2SClient creates a new instance of VirtualHubRouteTableV2SClient. func (c *ClientFactory) NewVirtualHubRouteTableV2SClient() *VirtualHubRouteTableV2SClient { subClient, _ := NewVirtualHubRouteTableV2SClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualHubsClient creates a new instance of VirtualHubsClient. func (c *ClientFactory) NewVirtualHubsClient() *VirtualHubsClient { subClient, _ := NewVirtualHubsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworkGatewayConnectionsClient creates a new instance of VirtualNetworkGatewayConnectionsClient. func (c *ClientFactory) NewVirtualNetworkGatewayConnectionsClient() *VirtualNetworkGatewayConnectionsClient { subClient, _ := NewVirtualNetworkGatewayConnectionsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworkGatewayNatRulesClient creates a new instance of VirtualNetworkGatewayNatRulesClient. func (c *ClientFactory) NewVirtualNetworkGatewayNatRulesClient() *VirtualNetworkGatewayNatRulesClient { subClient, _ := NewVirtualNetworkGatewayNatRulesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworkGatewaysClient creates a new instance of VirtualNetworkGatewaysClient. func (c *ClientFactory) NewVirtualNetworkGatewaysClient() *VirtualNetworkGatewaysClient { subClient, _ := NewVirtualNetworkGatewaysClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworkPeeringsClient creates a new instance of VirtualNetworkPeeringsClient. func (c *ClientFactory) NewVirtualNetworkPeeringsClient() *VirtualNetworkPeeringsClient { subClient, _ := NewVirtualNetworkPeeringsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworkTapsClient creates a new instance of VirtualNetworkTapsClient. func (c *ClientFactory) NewVirtualNetworkTapsClient() *VirtualNetworkTapsClient { subClient, _ := NewVirtualNetworkTapsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualNetworksClient creates a new instance of VirtualNetworksClient. func (c *ClientFactory) NewVirtualNetworksClient() *VirtualNetworksClient { subClient, _ := NewVirtualNetworksClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualRouterPeeringsClient creates a new instance of VirtualRouterPeeringsClient. func (c *ClientFactory) NewVirtualRouterPeeringsClient() *VirtualRouterPeeringsClient { subClient, _ := NewVirtualRouterPeeringsClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualRoutersClient creates a new instance of VirtualRoutersClient. func (c *ClientFactory) NewVirtualRoutersClient() *VirtualRoutersClient { subClient, _ := NewVirtualRoutersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewVirtualWansClient creates a new instance of VirtualWansClient. func (c *ClientFactory) NewVirtualWansClient() *VirtualWansClient { subClient, _ := NewVirtualWansClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewWatchersClient creates a new instance of WatchersClient. func (c *ClientFactory) NewWatchersClient() *WatchersClient { subClient, _ := NewWatchersClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewWebApplicationFirewallPoliciesClient creates a new instance of WebApplicationFirewallPoliciesClient. func (c *ClientFactory) NewWebApplicationFirewallPoliciesClient() *WebApplicationFirewallPoliciesClient { subClient, _ := NewWebApplicationFirewallPoliciesClient(c.subscriptionID, c.credential, c.options) return subClient } +// NewWebCategoriesClient creates a new instance of WebCategoriesClient. func (c *ClientFactory) NewWebCategoriesClient() *WebCategoriesClient { subClient, _ := NewWebCategoriesClient(c.subscriptionID, c.credential, c.options) return subClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/configurationpolicygroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/configurationpolicygroups_client.go index 864b8e202..feeea87c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/configurationpolicygroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/configurationpolicygroups_client.go @@ -33,7 +33,7 @@ type ConfigurationPolicyGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewConfigurationPolicyGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ConfigurationPolicyGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".ConfigurationPolicyGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ConfigurationPolicyGroupsClient) BeginCreateOrUpdate(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConfigurationPolicyGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConfigurationPolicyGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConfigurationPolicyGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ConfigurationPolicyGroupsClient) BeginCreateOrUpdate(ctx context.C // Generated from API version 2023-05-01 func (client *ConfigurationPolicyGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, vpnServerConfigurationPolicyGroupParameters VPNServerConfigurationPolicyGroup, options *ConfigurationPolicyGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ConfigurationPolicyGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, configurationPolicyGroupName, vpnServerConfigurationPolicyGroupParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ConfigurationPolicyGroupsClient) BeginDelete(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConfigurationPolicyGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConfigurationPolicyGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConfigurationPolicyGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ConfigurationPolicyGroupsClient) BeginDelete(ctx context.Context, // Generated from API version 2023-05-01 func (client *ConfigurationPolicyGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, options *ConfigurationPolicyGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ConfigurationPolicyGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, configurationPolicyGroupName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ConfigurationPolicyGroupsClient) deleteCreateRequest(ctx context.C // method. func (client *ConfigurationPolicyGroupsClient) Get(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, configurationPolicyGroupName string, options *ConfigurationPolicyGroupsClientGetOptions) (ConfigurationPolicyGroupsClientGetResponse, error) { var err error + const operationName = "ConfigurationPolicyGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, configurationPolicyGroupName, options) if err != nil { return ConfigurationPolicyGroupsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ConfigurationPolicyGroupsClient) NewListByVPNServerConfigurationPa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse) (ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVPNServerConfigurationCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ConfigurationPolicyGroupsClient.NewListByVPNServerConfigurationPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVPNServerConfigurationCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, options) + }, nil) if err != nil { return ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ConfigurationPolicyGroupsClientListByVPNServerConfigurationResponse{}, runtime.NewResponseError(resp) - } return client.listByVPNServerConfigurationHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectionmonitors_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectionmonitors_client.go index d3a726752..905523443 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectionmonitors_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectionmonitors_client.go @@ -33,7 +33,7 @@ type ConnectionMonitorsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewConnectionMonitorsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ConnectionMonitorsClient, error) { - cl, err := arm.NewClient(moduleName+".ConnectionMonitorsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ConnectionMonitorsClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectionMonitorsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectionMonitorsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectionMonitorsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ConnectionMonitorsClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *ConnectionMonitorsClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters ConnectionMonitor, options *ConnectionMonitorsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ConnectionMonitorsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, parameters, options) if err != nil { return nil, err @@ -143,10 +150,13 @@ func (client *ConnectionMonitorsClient) BeginDelete(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectionMonitorsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectionMonitorsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectionMonitorsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -156,6 +166,10 @@ func (client *ConnectionMonitorsClient) BeginDelete(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *ConnectionMonitorsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *ConnectionMonitorsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ConnectionMonitorsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, options) if err != nil { return nil, err @@ -211,6 +225,10 @@ func (client *ConnectionMonitorsClient) deleteCreateRequest(ctx context.Context, // - options - ConnectionMonitorsClientGetOptions contains the optional parameters for the ConnectionMonitorsClient.Get method. func (client *ConnectionMonitorsClient) Get(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *ConnectionMonitorsClientGetOptions) (ConnectionMonitorsClientGetResponse, error) { var err error + const operationName = "ConnectionMonitorsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, options) if err != nil { return ConnectionMonitorsClientGetResponse{}, err @@ -279,6 +297,7 @@ func (client *ConnectionMonitorsClient) NewListPager(resourceGroupName string, n return false }, Fetcher: func(ctx context.Context, page *ConnectionMonitorsClientListResponse) (ConnectionMonitorsClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ConnectionMonitorsClient.NewListPager") req, err := client.listCreateRequest(ctx, resourceGroupName, networkWatcherName, options) if err != nil { return ConnectionMonitorsClientListResponse{}, err @@ -292,6 +311,7 @@ func (client *ConnectionMonitorsClient) NewListPager(resourceGroupName string, n } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -347,10 +367,13 @@ func (client *ConnectionMonitorsClient) BeginQuery(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectionMonitorsClientQueryResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectionMonitorsClientQueryResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectionMonitorsClientQueryResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -360,6 +383,10 @@ func (client *ConnectionMonitorsClient) BeginQuery(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *ConnectionMonitorsClient) query(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *ConnectionMonitorsClientBeginQueryOptions) (*http.Response, error) { var err error + const operationName = "ConnectionMonitorsClient.BeginQuery" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.queryCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, options) if err != nil { return nil, err @@ -422,10 +449,13 @@ func (client *ConnectionMonitorsClient) BeginStart(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectionMonitorsClientStartResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectionMonitorsClientStartResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectionMonitorsClientStartResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -435,6 +465,10 @@ func (client *ConnectionMonitorsClient) BeginStart(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *ConnectionMonitorsClient) start(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *ConnectionMonitorsClientBeginStartOptions) (*http.Response, error) { var err error + const operationName = "ConnectionMonitorsClient.BeginStart" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, options) if err != nil { return nil, err @@ -497,10 +531,13 @@ func (client *ConnectionMonitorsClient) BeginStop(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectionMonitorsClientStopResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectionMonitorsClientStopResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectionMonitorsClientStopResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -510,6 +547,10 @@ func (client *ConnectionMonitorsClient) BeginStop(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *ConnectionMonitorsClient) stop(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, options *ConnectionMonitorsClientBeginStopOptions) (*http.Response, error) { var err error + const operationName = "ConnectionMonitorsClient.BeginStop" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, options) if err != nil { return nil, err @@ -567,6 +608,10 @@ func (client *ConnectionMonitorsClient) stopCreateRequest(ctx context.Context, r // method. func (client *ConnectionMonitorsClient) UpdateTags(ctx context.Context, resourceGroupName string, networkWatcherName string, connectionMonitorName string, parameters TagsObject, options *ConnectionMonitorsClientUpdateTagsOptions) (ConnectionMonitorsClientUpdateTagsResponse, error) { var err error + const operationName = "ConnectionMonitorsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkWatcherName, connectionMonitorName, parameters, options) if err != nil { return ConnectionMonitorsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectivityconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectivityconfigurations_client.go index 4a9d51534..0e0317c17 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectivityconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectivityconfigurations_client.go @@ -34,7 +34,7 @@ type ConnectivityConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewConnectivityConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ConnectivityConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".ConnectivityConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,6 +57,10 @@ func NewConnectivityConfigurationsClient(subscriptionID string, credential azcor // method. func (client *ConnectivityConfigurationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, connectivityConfiguration ConnectivityConfiguration, options *ConnectivityConfigurationsClientCreateOrUpdateOptions) (ConnectivityConfigurationsClientCreateOrUpdateResponse, error) { var err error + const operationName = "ConnectivityConfigurationsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, connectivityConfiguration, options) if err != nil { return ConnectivityConfigurationsClientCreateOrUpdateResponse{}, err @@ -133,10 +137,13 @@ func (client *ConnectivityConfigurationsClient) BeginDelete(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ConnectivityConfigurationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ConnectivityConfigurationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ConnectivityConfigurationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +154,10 @@ func (client *ConnectivityConfigurationsClient) BeginDelete(ctx context.Context, // Generated from API version 2023-05-01 func (client *ConnectivityConfigurationsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *ConnectivityConfigurationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ConnectivityConfigurationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) if err != nil { return nil, err @@ -207,6 +218,10 @@ func (client *ConnectivityConfigurationsClient) deleteCreateRequest(ctx context. // method. func (client *ConnectivityConfigurationsClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *ConnectivityConfigurationsClientGetOptions) (ConnectivityConfigurationsClientGetResponse, error) { var err error + const operationName = "ConnectivityConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) if err != nil { return ConnectivityConfigurationsClientGetResponse{}, err @@ -275,25 +290,20 @@ func (client *ConnectivityConfigurationsClient) NewListPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ConnectivityConfigurationsClientListResponse) (ConnectivityConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ConnectivityConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) + }, nil) if err != nil { return ConnectivityConfigurationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ConnectivityConfigurationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ConnectivityConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/constants.go index ad3170b2d..6ec8bdb32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/constants.go @@ -9,8 +9,8 @@ package armnetwork const ( - moduleName = "armnetwork" - moduleVersion = "v4.2.0" + moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" + moduleVersion = "v4.3.0" ) // Access - Access to be allowed or denied. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/customipprefixes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/customipprefixes_client.go index 8cead915f..52935bf86 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/customipprefixes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/customipprefixes_client.go @@ -33,7 +33,7 @@ type CustomIPPrefixesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewCustomIPPrefixesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CustomIPPrefixesClient, error) { - cl, err := arm.NewClient(moduleName+".CustomIPPrefixesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *CustomIPPrefixesClient) BeginCreateOrUpdate(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CustomIPPrefixesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[CustomIPPrefixesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[CustomIPPrefixesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *CustomIPPrefixesClient) BeginCreateOrUpdate(ctx context.Context, r // Generated from API version 2023-05-01 func (client *CustomIPPrefixesClient) createOrUpdate(ctx context.Context, resourceGroupName string, customIPPrefixName string, parameters CustomIPPrefix, options *CustomIPPrefixesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "CustomIPPrefixesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, customIPPrefixName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *CustomIPPrefixesClient) BeginDelete(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CustomIPPrefixesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[CustomIPPrefixesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[CustomIPPrefixesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *CustomIPPrefixesClient) BeginDelete(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *CustomIPPrefixesClient) deleteOperation(ctx context.Context, resourceGroupName string, customIPPrefixName string, options *CustomIPPrefixesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "CustomIPPrefixesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, customIPPrefixName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *CustomIPPrefixesClient) deleteCreateRequest(ctx context.Context, r // - options - CustomIPPrefixesClientGetOptions contains the optional parameters for the CustomIPPrefixesClient.Get method. func (client *CustomIPPrefixesClient) Get(ctx context.Context, resourceGroupName string, customIPPrefixName string, options *CustomIPPrefixesClientGetOptions) (CustomIPPrefixesClientGetResponse, error) { var err error + const operationName = "CustomIPPrefixesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, customIPPrefixName, options) if err != nil { return CustomIPPrefixesClientGetResponse{}, err @@ -263,25 +281,20 @@ func (client *CustomIPPrefixesClient) NewListPager(resourceGroupName string, opt return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *CustomIPPrefixesClientListResponse) (CustomIPPrefixesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return CustomIPPrefixesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "CustomIPPrefixesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return CustomIPPrefixesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return CustomIPPrefixesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -327,25 +340,20 @@ func (client *CustomIPPrefixesClient) NewListAllPager(options *CustomIPPrefixesC return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *CustomIPPrefixesClientListAllResponse) (CustomIPPrefixesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "CustomIPPrefixesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return CustomIPPrefixesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return CustomIPPrefixesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return CustomIPPrefixesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -387,6 +395,10 @@ func (client *CustomIPPrefixesClient) listAllHandleResponse(resp *http.Response) // method. func (client *CustomIPPrefixesClient) UpdateTags(ctx context.Context, resourceGroupName string, customIPPrefixName string, parameters TagsObject, options *CustomIPPrefixesClientUpdateTagsOptions) (CustomIPPrefixesClientUpdateTagsResponse, error) { var err error + const operationName = "CustomIPPrefixesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, customIPPrefixName, parameters, options) if err != nil { return CustomIPPrefixesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddoscustompolicies_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddoscustompolicies_client.go index fd0568c90..97593c1f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddoscustompolicies_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddoscustompolicies_client.go @@ -33,7 +33,7 @@ type DdosCustomPoliciesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewDdosCustomPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DdosCustomPoliciesClient, error) { - cl, err := arm.NewClient(moduleName+".DdosCustomPoliciesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *DdosCustomPoliciesClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DdosCustomPoliciesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DdosCustomPoliciesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DdosCustomPoliciesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *DdosCustomPoliciesClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *DdosCustomPoliciesClient) createOrUpdate(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, parameters DdosCustomPolicy, options *DdosCustomPoliciesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "DdosCustomPoliciesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, ddosCustomPolicyName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *DdosCustomPoliciesClient) BeginDelete(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DdosCustomPoliciesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DdosCustomPoliciesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DdosCustomPoliciesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *DdosCustomPoliciesClient) BeginDelete(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *DdosCustomPoliciesClient) deleteOperation(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, options *DdosCustomPoliciesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "DdosCustomPoliciesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, ddosCustomPolicyName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *DdosCustomPoliciesClient) deleteCreateRequest(ctx context.Context, // - options - DdosCustomPoliciesClientGetOptions contains the optional parameters for the DdosCustomPoliciesClient.Get method. func (client *DdosCustomPoliciesClient) Get(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, options *DdosCustomPoliciesClientGetOptions) (DdosCustomPoliciesClientGetResponse, error) { var err error + const operationName = "DdosCustomPoliciesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, ddosCustomPolicyName, options) if err != nil { return DdosCustomPoliciesClientGetResponse{}, err @@ -259,6 +277,10 @@ func (client *DdosCustomPoliciesClient) getHandleResponse(resp *http.Response) ( // method. func (client *DdosCustomPoliciesClient) UpdateTags(ctx context.Context, resourceGroupName string, ddosCustomPolicyName string, parameters TagsObject, options *DdosCustomPoliciesClientUpdateTagsOptions) (DdosCustomPoliciesClientUpdateTagsResponse, error) { var err error + const operationName = "DdosCustomPoliciesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, ddosCustomPolicyName, parameters, options) if err != nil { return DdosCustomPoliciesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddosprotectionplans_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddosprotectionplans_client.go index d8fc5263a..24e240859 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddosprotectionplans_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddosprotectionplans_client.go @@ -33,7 +33,7 @@ type DdosProtectionPlansClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewDdosProtectionPlansClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DdosProtectionPlansClient, error) { - cl, err := arm.NewClient(moduleName+".DdosProtectionPlansClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *DdosProtectionPlansClient) BeginCreateOrUpdate(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DdosProtectionPlansClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DdosProtectionPlansClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DdosProtectionPlansClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *DdosProtectionPlansClient) BeginCreateOrUpdate(ctx context.Context // Generated from API version 2023-05-01 func (client *DdosProtectionPlansClient) createOrUpdate(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, parameters DdosProtectionPlan, options *DdosProtectionPlansClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "DdosProtectionPlansClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, ddosProtectionPlanName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *DdosProtectionPlansClient) BeginDelete(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DdosProtectionPlansClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DdosProtectionPlansClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DdosProtectionPlansClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *DdosProtectionPlansClient) BeginDelete(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *DdosProtectionPlansClient) deleteOperation(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *DdosProtectionPlansClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "DdosProtectionPlansClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, ddosProtectionPlanName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *DdosProtectionPlansClient) deleteCreateRequest(ctx context.Context // - options - DdosProtectionPlansClientGetOptions contains the optional parameters for the DdosProtectionPlansClient.Get method. func (client *DdosProtectionPlansClient) Get(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *DdosProtectionPlansClientGetOptions) (DdosProtectionPlansClientGetResponse, error) { var err error + const operationName = "DdosProtectionPlansClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, ddosProtectionPlanName, options) if err != nil { return DdosProtectionPlansClientGetResponse{}, err @@ -259,25 +277,20 @@ func (client *DdosProtectionPlansClient) NewListPager(options *DdosProtectionPla return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *DdosProtectionPlansClientListResponse) (DdosProtectionPlansClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return DdosProtectionPlansClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DdosProtectionPlansClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return DdosProtectionPlansClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return DdosProtectionPlansClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -320,25 +333,20 @@ func (client *DdosProtectionPlansClient) NewListByResourceGroupPager(resourceGro return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *DdosProtectionPlansClientListByResourceGroupResponse) (DdosProtectionPlansClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DdosProtectionPlansClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return DdosProtectionPlansClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return DdosProtectionPlansClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return DdosProtectionPlansClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -384,6 +392,10 @@ func (client *DdosProtectionPlansClient) listByResourceGroupHandleResponse(resp // method. func (client *DdosProtectionPlansClient) UpdateTags(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, parameters TagsObject, options *DdosProtectionPlansClientUpdateTagsOptions) (DdosProtectionPlansClientUpdateTagsResponse, error) { var err error + const operationName = "DdosProtectionPlansClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, ddosProtectionPlanName, parameters, options) if err != nil { return DdosProtectionPlansClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/defaultsecurityrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/defaultsecurityrules_client.go index 50b21fe78..3ebcfd56e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/defaultsecurityrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/defaultsecurityrules_client.go @@ -33,7 +33,7 @@ type DefaultSecurityRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewDefaultSecurityRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DefaultSecurityRulesClient, error) { - cl, err := arm.NewClient(moduleName+".DefaultSecurityRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewDefaultSecurityRulesClient(subscriptionID string, credential azcore.Toke // method. func (client *DefaultSecurityRulesClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, defaultSecurityRuleName string, options *DefaultSecurityRulesClientGetOptions) (DefaultSecurityRulesClientGetResponse, error) { var err error + const operationName = "DefaultSecurityRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, defaultSecurityRuleName, options) if err != nil { return DefaultSecurityRulesClientGetResponse{}, err @@ -123,25 +127,20 @@ func (client *DefaultSecurityRulesClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *DefaultSecurityRulesClientListResponse) (DefaultSecurityRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DefaultSecurityRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) + }, nil) if err != nil { return DefaultSecurityRulesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return DefaultSecurityRulesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return DefaultSecurityRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/dscpconfiguration_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/dscpconfiguration_client.go index 4394ca4bd..733734d3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/dscpconfiguration_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/dscpconfiguration_client.go @@ -33,7 +33,7 @@ type DscpConfigurationClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewDscpConfigurationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DscpConfigurationClient, error) { - cl, err := arm.NewClient(moduleName+".DscpConfigurationClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *DscpConfigurationClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DscpConfigurationClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DscpConfigurationClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DscpConfigurationClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *DscpConfigurationClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *DscpConfigurationClient) createOrUpdate(ctx context.Context, resourceGroupName string, dscpConfigurationName string, parameters DscpConfiguration, options *DscpConfigurationClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "DscpConfigurationClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, dscpConfigurationName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *DscpConfigurationClient) BeginDelete(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DscpConfigurationClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[DscpConfigurationClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DscpConfigurationClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *DscpConfigurationClient) BeginDelete(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *DscpConfigurationClient) deleteOperation(ctx context.Context, resourceGroupName string, dscpConfigurationName string, options *DscpConfigurationClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "DscpConfigurationClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, dscpConfigurationName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *DscpConfigurationClient) deleteCreateRequest(ctx context.Context, // - options - DscpConfigurationClientGetOptions contains the optional parameters for the DscpConfigurationClient.Get method. func (client *DscpConfigurationClient) Get(ctx context.Context, resourceGroupName string, dscpConfigurationName string, options *DscpConfigurationClientGetOptions) (DscpConfigurationClientGetResponse, error) { var err error + const operationName = "DscpConfigurationClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, dscpConfigurationName, options) if err != nil { return DscpConfigurationClientGetResponse{}, err @@ -260,25 +278,20 @@ func (client *DscpConfigurationClient) NewListPager(resourceGroupName string, op return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *DscpConfigurationClientListResponse) (DscpConfigurationClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return DscpConfigurationClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DscpConfigurationClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return DscpConfigurationClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return DscpConfigurationClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -324,25 +337,20 @@ func (client *DscpConfigurationClient) NewListAllPager(options *DscpConfiguratio return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *DscpConfigurationClientListAllResponse) (DscpConfigurationClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DscpConfigurationClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return DscpConfigurationClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return DscpConfigurationClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return DscpConfigurationClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitauthorizations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitauthorizations_client.go index dd4058dd3..91d2d8c29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitauthorizations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitauthorizations_client.go @@ -33,7 +33,7 @@ type ExpressRouteCircuitAuthorizationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCircuitAuthorizationsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCircuitAuthorizationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ExpressRouteCircuitAuthorizationsClient) BeginCreateOrUpdate(ctx c } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitAuthorizationsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ExpressRouteCircuitAuthorizationsClient) BeginCreateOrUpdate(ctx c // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitAuthorizationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, options *ExpressRouteCircuitAuthorizationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitAuthorizationsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, circuitName, authorizationName, authorizationParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ExpressRouteCircuitAuthorizationsClient) BeginDelete(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitAuthorizationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitAuthorizationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitAuthorizationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ExpressRouteCircuitAuthorizationsClient) BeginDelete(ctx context.C // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitAuthorizationsClient) deleteOperation(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, options *ExpressRouteCircuitAuthorizationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitAuthorizationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, circuitName, authorizationName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ExpressRouteCircuitAuthorizationsClient) deleteCreateRequest(ctx c // method. func (client *ExpressRouteCircuitAuthorizationsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, options *ExpressRouteCircuitAuthorizationsClientGetOptions) (ExpressRouteCircuitAuthorizationsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCircuitAuthorizationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, circuitName, authorizationName, options) if err != nil { return ExpressRouteCircuitAuthorizationsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ExpressRouteCircuitAuthorizationsClient) NewListPager(resourceGrou return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCircuitAuthorizationsClientListResponse) (ExpressRouteCircuitAuthorizationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, circuitName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRouteCircuitAuthorizationsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCircuitAuthorizationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, circuitName, options) + }, nil) if err != nil { return ExpressRouteCircuitAuthorizationsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCircuitAuthorizationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitconnections_client.go index 6418a6761..e07aaa0b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitconnections_client.go @@ -33,7 +33,7 @@ type ExpressRouteCircuitConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCircuitConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCircuitConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCircuitConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -64,10 +64,13 @@ func (client *ExpressRouteCircuitConnectionsClient) BeginCreateOrUpdate(ctx cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -77,6 +80,10 @@ func (client *ExpressRouteCircuitConnectionsClient) BeginCreateOrUpdate(ctx cont // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, expressRouteCircuitConnectionParameters ExpressRouteCircuitConnection, options *ExpressRouteCircuitConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, circuitName, peeringName, connectionName, expressRouteCircuitConnectionParameters, options) if err != nil { return nil, err @@ -147,10 +154,13 @@ func (client *ExpressRouteCircuitConnectionsClient) BeginDelete(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -160,6 +170,10 @@ func (client *ExpressRouteCircuitConnectionsClient) BeginDelete(ctx context.Cont // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *ExpressRouteCircuitConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, circuitName, peeringName, connectionName, options) if err != nil { return nil, err @@ -221,6 +235,10 @@ func (client *ExpressRouteCircuitConnectionsClient) deleteCreateRequest(ctx cont // method. func (client *ExpressRouteCircuitConnectionsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *ExpressRouteCircuitConnectionsClientGetOptions) (ExpressRouteCircuitConnectionsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCircuitConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, circuitName, peeringName, connectionName, options) if err != nil { return ExpressRouteCircuitConnectionsClientGetResponse{}, err @@ -294,25 +312,20 @@ func (client *ExpressRouteCircuitConnectionsClient) NewListPager(resourceGroupNa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCircuitConnectionsClientListResponse) (ExpressRouteCircuitConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRouteCircuitConnectionsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCircuitConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) + }, nil) if err != nil { return ExpressRouteCircuitConnectionsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCircuitConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitpeerings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitpeerings_client.go index af49b8922..4d849efaf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitpeerings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitpeerings_client.go @@ -33,7 +33,7 @@ type ExpressRouteCircuitPeeringsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCircuitPeeringsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCircuitPeeringsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCircuitPeeringsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ExpressRouteCircuitPeeringsClient) BeginCreateOrUpdate(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitPeeringsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ExpressRouteCircuitPeeringsClient) BeginCreateOrUpdate(ctx context // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitPeeringsClient) createOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, options *ExpressRouteCircuitPeeringsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitPeeringsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, circuitName, peeringName, peeringParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ExpressRouteCircuitPeeringsClient) BeginDelete(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitPeeringsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitPeeringsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitPeeringsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ExpressRouteCircuitPeeringsClient) BeginDelete(ctx context.Context // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitPeeringsClient) deleteOperation(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *ExpressRouteCircuitPeeringsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitPeeringsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ExpressRouteCircuitPeeringsClient) deleteCreateRequest(ctx context // method. func (client *ExpressRouteCircuitPeeringsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *ExpressRouteCircuitPeeringsClientGetOptions) (ExpressRouteCircuitPeeringsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCircuitPeeringsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) if err != nil { return ExpressRouteCircuitPeeringsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ExpressRouteCircuitPeeringsClient) NewListPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCircuitPeeringsClientListResponse) (ExpressRouteCircuitPeeringsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, circuitName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRouteCircuitPeeringsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCircuitPeeringsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, circuitName, options) + }, nil) if err != nil { return ExpressRouteCircuitPeeringsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCircuitPeeringsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuits_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuits_client.go index 689c1024d..e106f07bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuits_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuits_client.go @@ -33,7 +33,7 @@ type ExpressRouteCircuitsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCircuitsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCircuitsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCircuitsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ExpressRouteCircuitsClient) BeginCreateOrUpdate(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ExpressRouteCircuitsClient) BeginCreateOrUpdate(ctx context.Contex // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitsClient) createOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, options *ExpressRouteCircuitsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, circuitName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *ExpressRouteCircuitsClient) BeginDelete(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *ExpressRouteCircuitsClient) BeginDelete(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitsClient) deleteOperation(ctx context.Context, resourceGroupName string, circuitName string, options *ExpressRouteCircuitsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, circuitName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *ExpressRouteCircuitsClient) deleteCreateRequest(ctx context.Contex // method. func (client *ExpressRouteCircuitsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, options *ExpressRouteCircuitsClientGetOptions) (ExpressRouteCircuitsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCircuitsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, circuitName, options) if err != nil { return ExpressRouteCircuitsClientGetResponse{}, err @@ -260,6 +278,10 @@ func (client *ExpressRouteCircuitsClient) getHandleResponse(resp *http.Response) // method. func (client *ExpressRouteCircuitsClient) GetPeeringStats(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, options *ExpressRouteCircuitsClientGetPeeringStatsOptions) (ExpressRouteCircuitsClientGetPeeringStatsResponse, error) { var err error + const operationName = "ExpressRouteCircuitsClient.GetPeeringStats" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getPeeringStatsCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) if err != nil { return ExpressRouteCircuitsClientGetPeeringStatsResponse{}, err @@ -325,6 +347,10 @@ func (client *ExpressRouteCircuitsClient) getPeeringStatsHandleResponse(resp *ht // method. func (client *ExpressRouteCircuitsClient) GetStats(ctx context.Context, resourceGroupName string, circuitName string, options *ExpressRouteCircuitsClientGetStatsOptions) (ExpressRouteCircuitsClientGetStatsResponse, error) { var err error + const operationName = "ExpressRouteCircuitsClient.GetStats" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getStatsCreateRequest(ctx, resourceGroupName, circuitName, options) if err != nil { return ExpressRouteCircuitsClientGetStatsResponse{}, err @@ -388,25 +414,20 @@ func (client *ExpressRouteCircuitsClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCircuitsClientListResponse) (ExpressRouteCircuitsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCircuitsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ExpressRouteCircuitsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRouteCircuitsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCircuitsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -452,25 +473,20 @@ func (client *ExpressRouteCircuitsClient) NewListAllPager(options *ExpressRouteC return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCircuitsClientListAllResponse) (ExpressRouteCircuitsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCircuitsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return ExpressRouteCircuitsClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRouteCircuitsClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCircuitsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -519,10 +535,13 @@ func (client *ExpressRouteCircuitsClient) BeginListArpTable(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitsClientListArpTableResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitsClientListArpTableResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitsClientListArpTableResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -532,6 +551,10 @@ func (client *ExpressRouteCircuitsClient) BeginListArpTable(ctx context.Context, // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitsClient) listArpTable(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *ExpressRouteCircuitsClientBeginListArpTableOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitsClient.BeginListArpTable" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listArpTableCreateRequest(ctx, resourceGroupName, circuitName, peeringName, devicePath, options) if err != nil { return nil, err @@ -600,10 +623,13 @@ func (client *ExpressRouteCircuitsClient) BeginListRoutesTable(ctx context.Conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitsClientListRoutesTableResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitsClientListRoutesTableResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitsClientListRoutesTableResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -613,6 +639,10 @@ func (client *ExpressRouteCircuitsClient) BeginListRoutesTable(ctx context.Conte // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitsClient) listRoutesTable(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *ExpressRouteCircuitsClientBeginListRoutesTableOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitsClient.BeginListRoutesTable" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listRoutesTableCreateRequest(ctx, resourceGroupName, circuitName, peeringName, devicePath, options) if err != nil { return nil, err @@ -681,10 +711,13 @@ func (client *ExpressRouteCircuitsClient) BeginListRoutesTableSummary(ctx contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCircuitsClientListRoutesTableSummaryResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCircuitsClientListRoutesTableSummaryResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCircuitsClientListRoutesTableSummaryResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -695,6 +728,10 @@ func (client *ExpressRouteCircuitsClient) BeginListRoutesTableSummary(ctx contex // Generated from API version 2023-05-01 func (client *ExpressRouteCircuitsClient) listRoutesTableSummary(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string, options *ExpressRouteCircuitsClientBeginListRoutesTableSummaryOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCircuitsClient.BeginListRoutesTableSummary" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listRoutesTableSummaryCreateRequest(ctx, resourceGroupName, circuitName, peeringName, devicePath, options) if err != nil { return nil, err @@ -755,6 +792,10 @@ func (client *ExpressRouteCircuitsClient) listRoutesTableSummaryCreateRequest(ct // method. func (client *ExpressRouteCircuitsClient) UpdateTags(ctx context.Context, resourceGroupName string, circuitName string, parameters TagsObject, options *ExpressRouteCircuitsClientUpdateTagsOptions) (ExpressRouteCircuitsClientUpdateTagsResponse, error) { var err error + const operationName = "ExpressRouteCircuitsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, circuitName, parameters, options) if err != nil { return ExpressRouteCircuitsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteconnections_client.go index b96b91628..40d454090 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteconnections_client.go @@ -33,7 +33,7 @@ type ExpressRouteConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ExpressRouteConnectionsClient) BeginCreateOrUpdate(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ExpressRouteConnectionsClient) BeginCreateOrUpdate(ctx context.Con // Generated from API version 2023-05-01 func (client *ExpressRouteConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, putExpressRouteConnectionParameters ExpressRouteConnection, options *ExpressRouteConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, connectionName, putExpressRouteConnectionParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ExpressRouteConnectionsClient) BeginDelete(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ExpressRouteConnectionsClient) BeginDelete(ctx context.Context, re // Generated from API version 2023-05-01 func (client *ExpressRouteConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, options *ExpressRouteConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, connectionName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ExpressRouteConnectionsClient) deleteCreateRequest(ctx context.Con // method. func (client *ExpressRouteConnectionsClient) Get(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, connectionName string, options *ExpressRouteConnectionsClientGetOptions) (ExpressRouteConnectionsClientGetResponse, error) { var err error + const operationName = "ExpressRouteConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, connectionName, options) if err != nil { return ExpressRouteConnectionsClientGetResponse{}, err @@ -274,6 +292,10 @@ func (client *ExpressRouteConnectionsClient) getHandleResponse(resp *http.Respon // method. func (client *ExpressRouteConnectionsClient) List(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *ExpressRouteConnectionsClientListOptions) (ExpressRouteConnectionsClientListResponse, error) { var err error + const operationName = "ExpressRouteConnectionsClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, options) if err != nil { return ExpressRouteConnectionsClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnectionpeerings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnectionpeerings_client.go index 7fe9b5b69..b3eb6901b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnectionpeerings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnectionpeerings_client.go @@ -33,7 +33,7 @@ type ExpressRouteCrossConnectionPeeringsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCrossConnectionPeeringsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCrossConnectionPeeringsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCrossConnectionPeeringsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) BeginCreateOrUpdate(ctx } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionPeeringsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) BeginCreateOrUpdate(ctx // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionPeeringsClient) createOrUpdate(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, peeringParameters ExpressRouteCrossConnectionPeering, options *ExpressRouteCrossConnectionPeeringsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionPeeringsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, peeringParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) BeginDelete(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionPeeringsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionPeeringsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionPeeringsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) BeginDelete(ctx context // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionPeeringsClient) deleteOperation(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, options *ExpressRouteCrossConnectionPeeringsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionPeeringsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) deleteCreateRequest(ctx // method. func (client *ExpressRouteCrossConnectionPeeringsClient) Get(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, options *ExpressRouteCrossConnectionPeeringsClientGetOptions) (ExpressRouteCrossConnectionPeeringsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCrossConnectionPeeringsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, options) if err != nil { return ExpressRouteCrossConnectionPeeringsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ExpressRouteCrossConnectionPeeringsClient) NewListPager(resourceGr return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCrossConnectionPeeringsClientListResponse) (ExpressRouteCrossConnectionPeeringsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, crossConnectionName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRouteCrossConnectionPeeringsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCrossConnectionPeeringsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, crossConnectionName, options) + }, nil) if err != nil { return ExpressRouteCrossConnectionPeeringsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCrossConnectionPeeringsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnections_client.go index 8594fdff7..0419a5555 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnections_client.go @@ -33,7 +33,7 @@ type ExpressRouteCrossConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteCrossConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteCrossConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteCrossConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ExpressRouteCrossConnectionsClient) BeginCreateOrUpdate(ctx contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ExpressRouteCrossConnectionsClient) BeginCreateOrUpdate(ctx contex // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, crossConnectionName string, parameters ExpressRouteCrossConnection, options *ExpressRouteCrossConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, crossConnectionName, parameters, options) if err != nil { return nil, err @@ -128,6 +135,10 @@ func (client *ExpressRouteCrossConnectionsClient) createOrUpdateCreateRequest(ct // method. func (client *ExpressRouteCrossConnectionsClient) Get(ctx context.Context, resourceGroupName string, crossConnectionName string, options *ExpressRouteCrossConnectionsClientGetOptions) (ExpressRouteCrossConnectionsClientGetResponse, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, crossConnectionName, options) if err != nil { return ExpressRouteCrossConnectionsClientGetResponse{}, err @@ -190,25 +201,20 @@ func (client *ExpressRouteCrossConnectionsClient) NewListPager(options *ExpressR return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCrossConnectionsClientListResponse) (ExpressRouteCrossConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRouteCrossConnectionsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCrossConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return ExpressRouteCrossConnectionsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCrossConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -258,10 +264,13 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListArpTable(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionsClientListArpTableResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionsClientListArpTableResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionsClientListArpTableResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -272,6 +281,10 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListArpTable(ctx context. // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionsClient) listArpTable(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *ExpressRouteCrossConnectionsClientBeginListArpTableOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.BeginListArpTable" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listArpTableCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath, options) if err != nil { return nil, err @@ -333,25 +346,20 @@ func (client *ExpressRouteCrossConnectionsClient) NewListByResourceGroupPager(re return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteCrossConnectionsClientListByResourceGroupResponse) (ExpressRouteCrossConnectionsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteCrossConnectionsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ExpressRouteCrossConnectionsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRouteCrossConnectionsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteCrossConnectionsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -405,10 +413,13 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListRoutesTable(ctx conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionsClientListRoutesTableResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionsClientListRoutesTableResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionsClientListRoutesTableResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -419,6 +430,10 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListRoutesTable(ctx conte // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionsClient) listRoutesTable(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *ExpressRouteCrossConnectionsClientBeginListRoutesTableOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.BeginListRoutesTable" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listRoutesTableCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath, options) if err != nil { return nil, err @@ -487,10 +502,13 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListRoutesTableSummary(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteCrossConnectionsClientListRoutesTableSummaryResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -501,6 +519,10 @@ func (client *ExpressRouteCrossConnectionsClient) BeginListRoutesTableSummary(ct // Generated from API version 2023-05-01 func (client *ExpressRouteCrossConnectionsClient) listRoutesTableSummary(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string, options *ExpressRouteCrossConnectionsClientBeginListRoutesTableSummaryOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.BeginListRoutesTableSummary" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listRoutesTableSummaryCreateRequest(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath, options) if err != nil { return nil, err @@ -561,6 +583,10 @@ func (client *ExpressRouteCrossConnectionsClient) listRoutesTableSummaryCreateRe // method. func (client *ExpressRouteCrossConnectionsClient) UpdateTags(ctx context.Context, resourceGroupName string, crossConnectionName string, crossConnectionParameters TagsObject, options *ExpressRouteCrossConnectionsClientUpdateTagsOptions) (ExpressRouteCrossConnectionsClientUpdateTagsResponse, error) { var err error + const operationName = "ExpressRouteCrossConnectionsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, crossConnectionName, crossConnectionParameters, options) if err != nil { return ExpressRouteCrossConnectionsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutegateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutegateways_client.go index 0bb66be65..e2b0b5e7a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutegateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutegateways_client.go @@ -33,7 +33,7 @@ type ExpressRouteGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ExpressRouteGatewaysClient) BeginCreateOrUpdate(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ExpressRouteGatewaysClient) BeginCreateOrUpdate(ctx context.Contex // Generated from API version 2023-05-01 func (client *ExpressRouteGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, putExpressRouteGatewayParameters ExpressRouteGateway, options *ExpressRouteGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, putExpressRouteGatewayParameters, options) if err != nil { return nil, err @@ -135,10 +142,13 @@ func (client *ExpressRouteGatewaysClient) BeginDelete(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -149,6 +159,10 @@ func (client *ExpressRouteGatewaysClient) BeginDelete(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *ExpressRouteGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *ExpressRouteGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, options) if err != nil { return nil, err @@ -200,6 +214,10 @@ func (client *ExpressRouteGatewaysClient) deleteCreateRequest(ctx context.Contex // method. func (client *ExpressRouteGatewaysClient) Get(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, options *ExpressRouteGatewaysClientGetOptions) (ExpressRouteGatewaysClientGetResponse, error) { var err error + const operationName = "ExpressRouteGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, options) if err != nil { return ExpressRouteGatewaysClientGetResponse{}, err @@ -260,6 +278,10 @@ func (client *ExpressRouteGatewaysClient) getHandleResponse(resp *http.Response) // method. func (client *ExpressRouteGatewaysClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, options *ExpressRouteGatewaysClientListByResourceGroupOptions) (ExpressRouteGatewaysClientListByResourceGroupResponse, error) { var err error + const operationName = "ExpressRouteGatewaysClient.ListByResourceGroup" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) if err != nil { return ExpressRouteGatewaysClientListByResourceGroupResponse{}, err @@ -315,6 +337,10 @@ func (client *ExpressRouteGatewaysClient) listByResourceGroupHandleResponse(resp // method. func (client *ExpressRouteGatewaysClient) ListBySubscription(ctx context.Context, options *ExpressRouteGatewaysClientListBySubscriptionOptions) (ExpressRouteGatewaysClientListBySubscriptionResponse, error) { var err error + const operationName = "ExpressRouteGatewaysClient.ListBySubscription" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listBySubscriptionCreateRequest(ctx, options) if err != nil { return ExpressRouteGatewaysClientListBySubscriptionResponse{}, err @@ -375,10 +401,13 @@ func (client *ExpressRouteGatewaysClient) BeginUpdateTags(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRouteGatewaysClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRouteGatewaysClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRouteGatewaysClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -388,6 +417,10 @@ func (client *ExpressRouteGatewaysClient) BeginUpdateTags(ctx context.Context, r // Generated from API version 2023-05-01 func (client *ExpressRouteGatewaysClient) updateTags(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, expressRouteGatewayParameters TagsObject, options *ExpressRouteGatewaysClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "ExpressRouteGatewaysClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, expressRouteGatewayName, expressRouteGatewayParameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutelinks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutelinks_client.go index e1343453c..99ea0d4b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutelinks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutelinks_client.go @@ -33,7 +33,7 @@ type ExpressRouteLinksClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteLinksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteLinksClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteLinksClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewExpressRouteLinksClient(subscriptionID string, credential azcore.TokenCr // - options - ExpressRouteLinksClientGetOptions contains the optional parameters for the ExpressRouteLinksClient.Get method. func (client *ExpressRouteLinksClient) Get(ctx context.Context, resourceGroupName string, expressRoutePortName string, linkName string, options *ExpressRouteLinksClientGetOptions) (ExpressRouteLinksClientGetResponse, error) { var err error + const operationName = "ExpressRouteLinksClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, expressRoutePortName, linkName, options) if err != nil { return ExpressRouteLinksClientGetResponse{}, err @@ -122,25 +126,20 @@ func (client *ExpressRouteLinksClient) NewListPager(resourceGroupName string, ex return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteLinksClientListResponse) (ExpressRouteLinksClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteLinksClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) + }, nil) if err != nil { return ExpressRouteLinksClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRouteLinksClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteLinksClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportauthorizations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportauthorizations_client.go index 53fcb6126..ec6e1737b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportauthorizations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportauthorizations_client.go @@ -33,7 +33,7 @@ type ExpressRoutePortAuthorizationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRoutePortAuthorizationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRoutePortAuthorizationsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRoutePortAuthorizationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ExpressRoutePortAuthorizationsClient) BeginCreateOrUpdate(ctx cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRoutePortAuthorizationsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ExpressRoutePortAuthorizationsClient) BeginCreateOrUpdate(ctx cont // Generated from API version 2023-05-01 func (client *ExpressRoutePortAuthorizationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, authorizationParameters ExpressRoutePortAuthorization, options *ExpressRoutePortAuthorizationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRoutePortAuthorizationsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, expressRoutePortName, authorizationName, authorizationParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ExpressRoutePortAuthorizationsClient) BeginDelete(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRoutePortAuthorizationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRoutePortAuthorizationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRoutePortAuthorizationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ExpressRoutePortAuthorizationsClient) BeginDelete(ctx context.Cont // Generated from API version 2023-05-01 func (client *ExpressRoutePortAuthorizationsClient) deleteOperation(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, options *ExpressRoutePortAuthorizationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRoutePortAuthorizationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, expressRoutePortName, authorizationName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ExpressRoutePortAuthorizationsClient) deleteCreateRequest(ctx cont // method. func (client *ExpressRoutePortAuthorizationsClient) Get(ctx context.Context, resourceGroupName string, expressRoutePortName string, authorizationName string, options *ExpressRoutePortAuthorizationsClientGetOptions) (ExpressRoutePortAuthorizationsClientGetResponse, error) { var err error + const operationName = "ExpressRoutePortAuthorizationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, expressRoutePortName, authorizationName, options) if err != nil { return ExpressRoutePortAuthorizationsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ExpressRoutePortAuthorizationsClient) NewListPager(resourceGroupNa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRoutePortAuthorizationsClientListResponse) (ExpressRoutePortAuthorizationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRoutePortAuthorizationsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRoutePortAuthorizationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) + }, nil) if err != nil { return ExpressRoutePortAuthorizationsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRoutePortAuthorizationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteports_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteports_client.go index 9fea55ab2..4811abea7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteports_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteports_client.go @@ -33,7 +33,7 @@ type ExpressRoutePortsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRoutePortsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRoutePortsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRoutePortsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ExpressRoutePortsClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRoutePortsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRoutePortsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRoutePortsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ExpressRoutePortsClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *ExpressRoutePortsClient) createOrUpdate(ctx context.Context, resourceGroupName string, expressRoutePortName string, parameters ExpressRoutePort, options *ExpressRoutePortsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ExpressRoutePortsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, expressRoutePortName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *ExpressRoutePortsClient) BeginDelete(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ExpressRoutePortsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ExpressRoutePortsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ExpressRoutePortsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *ExpressRoutePortsClient) BeginDelete(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *ExpressRoutePortsClient) deleteOperation(ctx context.Context, resourceGroupName string, expressRoutePortName string, options *ExpressRoutePortsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ExpressRoutePortsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) if err != nil { return nil, err @@ -199,6 +213,10 @@ func (client *ExpressRoutePortsClient) deleteCreateRequest(ctx context.Context, // method. func (client *ExpressRoutePortsClient) GenerateLOA(ctx context.Context, resourceGroupName string, expressRoutePortName string, request GenerateExpressRoutePortsLOARequest, options *ExpressRoutePortsClientGenerateLOAOptions) (ExpressRoutePortsClientGenerateLOAResponse, error) { var err error + const operationName = "ExpressRoutePortsClient.GenerateLOA" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.generateLOACreateRequest(ctx, resourceGroupName, expressRoutePortName, request, options) if err != nil { return ExpressRoutePortsClientGenerateLOAResponse{}, err @@ -262,6 +280,10 @@ func (client *ExpressRoutePortsClient) generateLOAHandleResponse(resp *http.Resp // - options - ExpressRoutePortsClientGetOptions contains the optional parameters for the ExpressRoutePortsClient.Get method. func (client *ExpressRoutePortsClient) Get(ctx context.Context, resourceGroupName string, expressRoutePortName string, options *ExpressRoutePortsClientGetOptions) (ExpressRoutePortsClientGetResponse, error) { var err error + const operationName = "ExpressRoutePortsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, expressRoutePortName, options) if err != nil { return ExpressRoutePortsClientGetResponse{}, err @@ -324,25 +346,20 @@ func (client *ExpressRoutePortsClient) NewListPager(options *ExpressRoutePortsCl return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRoutePortsClientListResponse) (ExpressRoutePortsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ExpressRoutePortsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRoutePortsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return ExpressRoutePortsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRoutePortsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -385,25 +402,20 @@ func (client *ExpressRoutePortsClient) NewListByResourceGroupPager(resourceGroup return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRoutePortsClientListByResourceGroupResponse) (ExpressRoutePortsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRoutePortsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ExpressRoutePortsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRoutePortsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRoutePortsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -449,6 +461,10 @@ func (client *ExpressRoutePortsClient) listByResourceGroupHandleResponse(resp *h // method. func (client *ExpressRoutePortsClient) UpdateTags(ctx context.Context, resourceGroupName string, expressRoutePortName string, parameters TagsObject, options *ExpressRoutePortsClientUpdateTagsOptions) (ExpressRoutePortsClientUpdateTagsResponse, error) { var err error + const operationName = "ExpressRoutePortsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, expressRoutePortName, parameters, options) if err != nil { return ExpressRoutePortsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportslocations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportslocations_client.go index cf95f3272..60221bb4a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportslocations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportslocations_client.go @@ -33,7 +33,7 @@ type ExpressRoutePortsLocationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRoutePortsLocationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRoutePortsLocationsClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRoutePortsLocationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewExpressRoutePortsLocationsClient(subscriptionID string, credential azcor // method. func (client *ExpressRoutePortsLocationsClient) Get(ctx context.Context, locationName string, options *ExpressRoutePortsLocationsClientGetOptions) (ExpressRoutePortsLocationsClientGetResponse, error) { var err error + const operationName = "ExpressRoutePortsLocationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, locationName, options) if err != nil { return ExpressRoutePortsLocationsClientGetResponse{}, err @@ -113,25 +117,20 @@ func (client *ExpressRoutePortsLocationsClient) NewListPager(options *ExpressRou return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRoutePortsLocationsClientListResponse) (ExpressRoutePortsLocationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRoutePortsLocationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return ExpressRoutePortsLocationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRoutePortsLocationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRoutePortsLocationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteproviderportslocation_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteproviderportslocation_client.go index 078531a8f..b3b55c5fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteproviderportslocation_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteproviderportslocation_client.go @@ -33,7 +33,7 @@ type ExpressRouteProviderPortsLocationClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteProviderPortsLocationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteProviderPortsLocationClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteProviderPortsLocationClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -52,6 +52,10 @@ func NewExpressRouteProviderPortsLocationClient(subscriptionID string, credentia // method. func (client *ExpressRouteProviderPortsLocationClient) List(ctx context.Context, options *ExpressRouteProviderPortsLocationClientListOptions) (ExpressRouteProviderPortsLocationClientListResponse, error) { var err error + const operationName = "ExpressRouteProviderPortsLocationClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, options) if err != nil { return ExpressRouteProviderPortsLocationClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteserviceproviders_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteserviceproviders_client.go index 35769d711..5efbab718 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteserviceproviders_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteserviceproviders_client.go @@ -33,7 +33,7 @@ type ExpressRouteServiceProvidersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewExpressRouteServiceProvidersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ExpressRouteServiceProvidersClient, error) { - cl, err := arm.NewClient(moduleName+".ExpressRouteServiceProvidersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,25 +55,20 @@ func (client *ExpressRouteServiceProvidersClient) NewListPager(options *ExpressR return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ExpressRouteServiceProvidersClientListResponse) (ExpressRouteServiceProvidersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ExpressRouteServiceProvidersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return ExpressRouteServiceProvidersClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ExpressRouteServiceProvidersClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ExpressRouteServiceProvidersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicies_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicies_client.go index a76c5c665..8c6460375 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicies_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicies_client.go @@ -33,7 +33,7 @@ type FirewallPoliciesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFirewallPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallPoliciesClient, error) { - cl, err := arm.NewClient(moduleName+".FirewallPoliciesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *FirewallPoliciesClient) BeginCreateOrUpdate(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallPoliciesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FirewallPoliciesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FirewallPoliciesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *FirewallPoliciesClient) BeginCreateOrUpdate(ctx context.Context, r // Generated from API version 2023-05-01 func (client *FirewallPoliciesClient) createOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters FirewallPolicy, options *FirewallPoliciesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "FirewallPoliciesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *FirewallPoliciesClient) BeginDelete(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallPoliciesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FirewallPoliciesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FirewallPoliciesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *FirewallPoliciesClient) BeginDelete(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *FirewallPoliciesClient) deleteOperation(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *FirewallPoliciesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "FirewallPoliciesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *FirewallPoliciesClient) deleteCreateRequest(ctx context.Context, r // - options - FirewallPoliciesClientGetOptions contains the optional parameters for the FirewallPoliciesClient.Get method. func (client *FirewallPoliciesClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *FirewallPoliciesClientGetOptions) (FirewallPoliciesClientGetResponse, error) { var err error + const operationName = "FirewallPoliciesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) if err != nil { return FirewallPoliciesClientGetResponse{}, err @@ -263,25 +281,20 @@ func (client *FirewallPoliciesClient) NewListPager(resourceGroupName string, opt return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *FirewallPoliciesClientListResponse) (FirewallPoliciesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return FirewallPoliciesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "FirewallPoliciesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return FirewallPoliciesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return FirewallPoliciesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -327,25 +340,20 @@ func (client *FirewallPoliciesClient) NewListAllPager(options *FirewallPoliciesC return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *FirewallPoliciesClientListAllResponse) (FirewallPoliciesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "FirewallPoliciesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return FirewallPoliciesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return FirewallPoliciesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return FirewallPoliciesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -387,6 +395,10 @@ func (client *FirewallPoliciesClient) listAllHandleResponse(resp *http.Response) // method. func (client *FirewallPoliciesClient) UpdateTags(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters TagsObject, options *FirewallPoliciesClientUpdateTagsOptions) (FirewallPoliciesClientUpdateTagsResponse, error) { var err error + const operationName = "FirewallPoliciesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return FirewallPoliciesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignatures_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignatures_client.go index a86af8905..db57d8830 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignatures_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignatures_client.go @@ -33,7 +33,7 @@ type FirewallPolicyIdpsSignaturesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFirewallPolicyIdpsSignaturesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallPolicyIdpsSignaturesClient, error) { - cl, err := arm.NewClient(moduleName+".FirewallPolicyIdpsSignaturesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewFirewallPolicyIdpsSignaturesClient(subscriptionID string, credential azc // method. func (client *FirewallPolicyIdpsSignaturesClient) List(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters IDPSQueryObject, options *FirewallPolicyIdpsSignaturesClientListOptions) (FirewallPolicyIdpsSignaturesClientListResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return FirewallPolicyIdpsSignaturesClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesfiltervalues_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesfiltervalues_client.go index a7b396d7c..2b23adb7b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesfiltervalues_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesfiltervalues_client.go @@ -33,7 +33,7 @@ type FirewallPolicyIdpsSignaturesFilterValuesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFirewallPolicyIdpsSignaturesFilterValuesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallPolicyIdpsSignaturesFilterValuesClient, error) { - cl, err := arm.NewClient(moduleName+".FirewallPolicyIdpsSignaturesFilterValuesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewFirewallPolicyIdpsSignaturesFilterValuesClient(subscriptionID string, cr // method. func (client *FirewallPolicyIdpsSignaturesFilterValuesClient) List(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters SignatureOverridesFilterValuesQuery, options *FirewallPolicyIdpsSignaturesFilterValuesClientListOptions) (FirewallPolicyIdpsSignaturesFilterValuesClientListResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesFilterValuesClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return FirewallPolicyIdpsSignaturesFilterValuesClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesoverrides_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesoverrides_client.go index e71a33e51..18a77c798 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesoverrides_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesoverrides_client.go @@ -33,7 +33,7 @@ type FirewallPolicyIdpsSignaturesOverridesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFirewallPolicyIdpsSignaturesOverridesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallPolicyIdpsSignaturesOverridesClient, error) { - cl, err := arm.NewClient(moduleName+".FirewallPolicyIdpsSignaturesOverridesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewFirewallPolicyIdpsSignaturesOverridesClient(subscriptionID string, crede // method. func (client *FirewallPolicyIdpsSignaturesOverridesClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *FirewallPolicyIdpsSignaturesOverridesClientGetOptions) (FirewallPolicyIdpsSignaturesOverridesClientGetResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesOverridesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) if err != nil { return FirewallPolicyIdpsSignaturesOverridesClientGetResponse{}, err @@ -115,6 +119,10 @@ func (client *FirewallPolicyIdpsSignaturesOverridesClient) getHandleResponse(res // method. func (client *FirewallPolicyIdpsSignaturesOverridesClient) List(ctx context.Context, resourceGroupName string, firewallPolicyName string, options *FirewallPolicyIdpsSignaturesOverridesClientListOptions) (FirewallPolicyIdpsSignaturesOverridesClientListResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesOverridesClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) if err != nil { return FirewallPolicyIdpsSignaturesOverridesClientListResponse{}, err @@ -177,6 +185,10 @@ func (client *FirewallPolicyIdpsSignaturesOverridesClient) listHandleResponse(re // method. func (client *FirewallPolicyIdpsSignaturesOverridesClient) Patch(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters SignaturesOverrides, options *FirewallPolicyIdpsSignaturesOverridesClientPatchOptions) (FirewallPolicyIdpsSignaturesOverridesClientPatchResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesOverridesClient.Patch" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.patchCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return FirewallPolicyIdpsSignaturesOverridesClientPatchResponse{}, err @@ -242,6 +254,10 @@ func (client *FirewallPolicyIdpsSignaturesOverridesClient) patchHandleResponse(r // method. func (client *FirewallPolicyIdpsSignaturesOverridesClient) Put(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters SignaturesOverrides, options *FirewallPolicyIdpsSignaturesOverridesClientPutOptions) (FirewallPolicyIdpsSignaturesOverridesClientPutResponse, error) { var err error + const operationName = "FirewallPolicyIdpsSignaturesOverridesClient.Put" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.putCreateRequest(ctx, resourceGroupName, firewallPolicyName, parameters, options) if err != nil { return FirewallPolicyIdpsSignaturesOverridesClientPutResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyrulecollectiongroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyrulecollectiongroups_client.go index da03590ef..beaba2afd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyrulecollectiongroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyrulecollectiongroups_client.go @@ -33,7 +33,7 @@ type FirewallPolicyRuleCollectionGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFirewallPolicyRuleCollectionGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallPolicyRuleCollectionGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".FirewallPolicyRuleCollectionGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) BeginCreateOrUpdate(ctx } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FirewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) BeginCreateOrUpdate(ctx // Generated from API version 2023-05-01 func (client *FirewallPolicyRuleCollectionGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, parameters FirewallPolicyRuleCollectionGroup, options *FirewallPolicyRuleCollectionGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "FirewallPolicyRuleCollectionGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, firewallPolicyName, ruleCollectionGroupName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) BeginDelete(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallPolicyRuleCollectionGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FirewallPolicyRuleCollectionGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FirewallPolicyRuleCollectionGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) BeginDelete(ctx context. // Generated from API version 2023-05-01 func (client *FirewallPolicyRuleCollectionGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, options *FirewallPolicyRuleCollectionGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "FirewallPolicyRuleCollectionGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, firewallPolicyName, ruleCollectionGroupName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) deleteCreateRequest(ctx // method. func (client *FirewallPolicyRuleCollectionGroupsClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleCollectionGroupName string, options *FirewallPolicyRuleCollectionGroupsClientGetOptions) (FirewallPolicyRuleCollectionGroupsClientGetResponse, error) { var err error + const operationName = "FirewallPolicyRuleCollectionGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, firewallPolicyName, ruleCollectionGroupName, options) if err != nil { return FirewallPolicyRuleCollectionGroupsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *FirewallPolicyRuleCollectionGroupsClient) NewListPager(resourceGro return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *FirewallPolicyRuleCollectionGroupsClientListResponse) (FirewallPolicyRuleCollectionGroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return FirewallPolicyRuleCollectionGroupsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "FirewallPolicyRuleCollectionGroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, firewallPolicyName, options) + }, nil) if err != nil { return FirewallPolicyRuleCollectionGroupsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return FirewallPolicyRuleCollectionGroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/flowlogs_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/flowlogs_client.go index 465e86008..25a9005f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/flowlogs_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/flowlogs_client.go @@ -33,7 +33,7 @@ type FlowLogsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewFlowLogsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FlowLogsClient, error) { - cl, err := arm.NewClient(moduleName+".FlowLogsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *FlowLogsClient) BeginCreateOrUpdate(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FlowLogsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FlowLogsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FlowLogsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *FlowLogsClient) BeginCreateOrUpdate(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *FlowLogsClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, parameters FlowLog, options *FlowLogsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "FlowLogsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkWatcherName, flowLogName, parameters, options) if err != nil { return nil, err @@ -139,10 +146,13 @@ func (client *FlowLogsClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FlowLogsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[FlowLogsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[FlowLogsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -152,6 +162,10 @@ func (client *FlowLogsClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *FlowLogsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, options *FlowLogsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "FlowLogsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkWatcherName, flowLogName, options) if err != nil { return nil, err @@ -207,6 +221,10 @@ func (client *FlowLogsClient) deleteCreateRequest(ctx context.Context, resourceG // - options - FlowLogsClientGetOptions contains the optional parameters for the FlowLogsClient.Get method. func (client *FlowLogsClient) Get(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, options *FlowLogsClientGetOptions) (FlowLogsClientGetResponse, error) { var err error + const operationName = "FlowLogsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkWatcherName, flowLogName, options) if err != nil { return FlowLogsClientGetResponse{}, err @@ -274,25 +292,20 @@ func (client *FlowLogsClient) NewListPager(resourceGroupName string, networkWatc return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *FlowLogsClientListResponse) (FlowLogsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkWatcherName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return FlowLogsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "FlowLogsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkWatcherName, options) + }, nil) if err != nil { return FlowLogsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return FlowLogsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -342,6 +355,10 @@ func (client *FlowLogsClient) listHandleResponse(resp *http.Response) (FlowLogsC // - options - FlowLogsClientUpdateTagsOptions contains the optional parameters for the FlowLogsClient.UpdateTags method. func (client *FlowLogsClient) UpdateTags(ctx context.Context, resourceGroupName string, networkWatcherName string, flowLogName string, parameters TagsObject, options *FlowLogsClientUpdateTagsOptions) (FlowLogsClientUpdateTagsResponse, error) { var err error + const operationName = "FlowLogsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkWatcherName, flowLogName, parameters, options) if err != nil { return FlowLogsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/groups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/groups_client.go index 7c1521aae..113bcc3bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/groups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/groups_client.go @@ -34,7 +34,7 @@ type GroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GroupsClient, error) { - cl, err := arm.NewClient(moduleName+".GroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,6 +56,10 @@ func NewGroupsClient(subscriptionID string, credential azcore.TokenCredential, o // - options - GroupsClientCreateOrUpdateOptions contains the optional parameters for the GroupsClient.CreateOrUpdate method. func (client *GroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, parameters Group, options *GroupsClientCreateOrUpdateOptions) (GroupsClientCreateOrUpdateResponse, error) { var err error + const operationName = "GroupsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, parameters, options) if err != nil { return GroupsClientCreateOrUpdateResponse{}, err @@ -136,10 +140,13 @@ func (client *GroupsClient) BeginDelete(ctx context.Context, resourceGroupName s } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[GroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[GroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[GroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -149,6 +156,10 @@ func (client *GroupsClient) BeginDelete(ctx context.Context, resourceGroupName s // Generated from API version 2023-05-01 func (client *GroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, options *GroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "GroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, options) if err != nil { return nil, err @@ -207,6 +218,10 @@ func (client *GroupsClient) deleteCreateRequest(ctx context.Context, resourceGro // - options - GroupsClientGetOptions contains the optional parameters for the GroupsClient.Get method. func (client *GroupsClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, options *GroupsClientGetOptions) (GroupsClientGetResponse, error) { var err error + const operationName = "GroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, options) if err != nil { return GroupsClientGetResponse{}, err @@ -274,25 +289,20 @@ func (client *GroupsClient) NewListPager(resourceGroupName string, networkManage return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *GroupsClientListResponse) (GroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "GroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) + }, nil) if err != nil { return GroupsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return GroupsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return GroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubroutetables_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubroutetables_client.go index 831c44840..d906077a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubroutetables_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubroutetables_client.go @@ -33,7 +33,7 @@ type HubRouteTablesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewHubRouteTablesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*HubRouteTablesClient, error) { - cl, err := arm.NewClient(moduleName+".HubRouteTablesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *HubRouteTablesClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[HubRouteTablesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[HubRouteTablesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[HubRouteTablesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *HubRouteTablesClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *HubRouteTablesClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, routeTableParameters HubRouteTable, options *HubRouteTablesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "HubRouteTablesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, routeTableParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *HubRouteTablesClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[HubRouteTablesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[HubRouteTablesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[HubRouteTablesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *HubRouteTablesClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *HubRouteTablesClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *HubRouteTablesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "HubRouteTablesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *HubRouteTablesClient) deleteCreateRequest(ctx context.Context, res // - options - HubRouteTablesClientGetOptions contains the optional parameters for the HubRouteTablesClient.Get method. func (client *HubRouteTablesClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *HubRouteTablesClientGetOptions) (HubRouteTablesClientGetResponse, error) { var err error + const operationName = "HubRouteTablesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, options) if err != nil { return HubRouteTablesClientGetResponse{}, err @@ -275,25 +293,20 @@ func (client *HubRouteTablesClient) NewListPager(resourceGroupName string, virtu return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *HubRouteTablesClientListResponse) (HubRouteTablesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return HubRouteTablesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "HubRouteTablesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return HubRouteTablesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return HubRouteTablesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubvirtualnetworkconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubvirtualnetworkconnections_client.go index d967eea8c..e47c8bb56 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubvirtualnetworkconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubvirtualnetworkconnections_client.go @@ -33,7 +33,7 @@ type HubVirtualNetworkConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewHubVirtualNetworkConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*HubVirtualNetworkConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".HubVirtualNetworkConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *HubVirtualNetworkConnectionsClient) BeginCreateOrUpdate(ctx contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[HubVirtualNetworkConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[HubVirtualNetworkConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[HubVirtualNetworkConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *HubVirtualNetworkConnectionsClient) BeginCreateOrUpdate(ctx contex // Generated from API version 2023-05-01 func (client *HubVirtualNetworkConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, hubVirtualNetworkConnectionParameters HubVirtualNetworkConnection, options *HubVirtualNetworkConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "HubVirtualNetworkConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, hubVirtualNetworkConnectionParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *HubVirtualNetworkConnectionsClient) BeginDelete(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[HubVirtualNetworkConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[HubVirtualNetworkConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[HubVirtualNetworkConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *HubVirtualNetworkConnectionsClient) BeginDelete(ctx context.Contex // Generated from API version 2023-05-01 func (client *HubVirtualNetworkConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *HubVirtualNetworkConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "HubVirtualNetworkConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *HubVirtualNetworkConnectionsClient) deleteCreateRequest(ctx contex // method. func (client *HubVirtualNetworkConnectionsClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *HubVirtualNetworkConnectionsClientGetOptions) (HubVirtualNetworkConnectionsClientGetResponse, error) { var err error + const operationName = "HubVirtualNetworkConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, options) if err != nil { return HubVirtualNetworkConnectionsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *HubVirtualNetworkConnectionsClient) NewListPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *HubVirtualNetworkConnectionsClientListResponse) (HubVirtualNetworkConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return HubVirtualNetworkConnectionsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "HubVirtualNetworkConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return HubVirtualNetworkConnectionsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return HubVirtualNetworkConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundnatrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundnatrules_client.go index 89906fcbc..464d80dc0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundnatrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundnatrules_client.go @@ -33,7 +33,7 @@ type InboundNatRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInboundNatRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InboundNatRulesClient, error) { - cl, err := arm.NewClient(moduleName+".InboundNatRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *InboundNatRulesClient) BeginCreateOrUpdate(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InboundNatRulesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InboundNatRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InboundNatRulesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *InboundNatRulesClient) BeginCreateOrUpdate(ctx context.Context, re // Generated from API version 2023-05-01 func (client *InboundNatRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters InboundNatRule, options *InboundNatRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "InboundNatRulesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName, inboundNatRuleParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *InboundNatRulesClient) BeginDelete(ctx context.Context, resourceGr } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InboundNatRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InboundNatRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InboundNatRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *InboundNatRulesClient) BeginDelete(ctx context.Context, resourceGr // Generated from API version 2023-05-01 func (client *InboundNatRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, options *InboundNatRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "InboundNatRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *InboundNatRulesClient) deleteCreateRequest(ctx context.Context, re // - options - InboundNatRulesClientGetOptions contains the optional parameters for the InboundNatRulesClient.Get method. func (client *InboundNatRulesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, inboundNatRuleName string, options *InboundNatRulesClientGetOptions) (InboundNatRulesClientGetResponse, error) { var err error + const operationName = "InboundNatRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, inboundNatRuleName, options) if err != nil { return InboundNatRulesClientGetResponse{}, err @@ -279,25 +297,20 @@ func (client *InboundNatRulesClient) NewListPager(resourceGroupName string, load return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InboundNatRulesClientListResponse) (InboundNatRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InboundNatRulesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InboundNatRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return InboundNatRulesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InboundNatRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundsecurityrule_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundsecurityrule_client.go index ca53f620f..41108ace5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundsecurityrule_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundsecurityrule_client.go @@ -33,7 +33,7 @@ type InboundSecurityRuleClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInboundSecurityRuleClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InboundSecurityRuleClient, error) { - cl, err := arm.NewClient(moduleName+".InboundSecurityRuleClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *InboundSecurityRuleClient) BeginCreateOrUpdate(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InboundSecurityRuleClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InboundSecurityRuleClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InboundSecurityRuleClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *InboundSecurityRuleClient) BeginCreateOrUpdate(ctx context.Context // Generated from API version 2023-05-01 func (client *InboundSecurityRuleClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, ruleCollectionName string, parameters InboundSecurityRule, options *InboundSecurityRuleClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "InboundSecurityRuleClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, ruleCollectionName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceipconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceipconfigurations_client.go index 26a48e5b4..805f961c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceipconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceipconfigurations_client.go @@ -33,7 +33,7 @@ type InterfaceIPConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInterfaceIPConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InterfaceIPConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".InterfaceIPConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewInterfaceIPConfigurationsClient(subscriptionID string, credential azcore // method. func (client *InterfaceIPConfigurationsClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, ipConfigurationName string, options *InterfaceIPConfigurationsClientGetOptions) (InterfaceIPConfigurationsClientGetResponse, error) { var err error + const operationName = "InterfaceIPConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkInterfaceName, ipConfigurationName, options) if err != nil { return InterfaceIPConfigurationsClientGetResponse{}, err @@ -123,25 +127,20 @@ func (client *InterfaceIPConfigurationsClient) NewListPager(resourceGroupName st return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfaceIPConfigurationsClientListResponse) (InterfaceIPConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfaceIPConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) + }, nil) if err != nil { return InterfaceIPConfigurationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return InterfaceIPConfigurationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfaceIPConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceloadbalancers_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceloadbalancers_client.go index 3db51e4ba..dd7e24733 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceloadbalancers_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceloadbalancers_client.go @@ -33,7 +33,7 @@ type InterfaceLoadBalancersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInterfaceLoadBalancersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InterfaceLoadBalancersClient, error) { - cl, err := arm.NewClient(moduleName+".InterfaceLoadBalancersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *InterfaceLoadBalancersClient) NewListPager(resourceGroupName strin return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfaceLoadBalancersClientListResponse) (InterfaceLoadBalancersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfaceLoadBalancersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) + }, nil) if err != nil { return InterfaceLoadBalancersClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return InterfaceLoadBalancersClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfaceLoadBalancersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces_client.go index 43e9d6843..e28e01c0b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces_client.go @@ -33,7 +33,7 @@ type InterfacesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInterfacesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InterfacesClient, error) { - cl, err := arm.NewClient(moduleName+".InterfacesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *InterfacesClient) BeginCreateOrUpdate(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfacesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfacesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfacesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *InterfacesClient) BeginCreateOrUpdate(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *InterfacesClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters Interface, options *InterfacesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "InterfacesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkInterfaceName, parameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *InterfacesClient) BeginDelete(ctx context.Context, resourceGroupNa } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfacesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfacesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfacesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *InterfacesClient) BeginDelete(ctx context.Context, resourceGroupNa // Generated from API version 2023-05-01 func (client *InterfacesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *InterfacesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "InterfacesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *InterfacesClient) deleteCreateRequest(ctx context.Context, resourc // - options - InterfacesClientGetOptions contains the optional parameters for the InterfacesClient.Get method. func (client *InterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *InterfacesClientGetOptions) (InterfacesClientGetResponse, error) { var err error + const operationName = "InterfacesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) if err != nil { return InterfacesClientGetResponse{}, err @@ -262,6 +280,10 @@ func (client *InterfacesClient) getHandleResponse(resp *http.Response) (Interfac // method. func (client *InterfacesClient) GetCloudServiceNetworkInterface(ctx context.Context, resourceGroupName string, cloudServiceName string, roleInstanceName string, networkInterfaceName string, options *InterfacesClientGetCloudServiceNetworkInterfaceOptions) (InterfacesClientGetCloudServiceNetworkInterfaceResponse, error) { var err error + const operationName = "InterfacesClient.GetCloudServiceNetworkInterface" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCloudServiceNetworkInterfaceCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, networkInterfaceName, options) if err != nil { return InterfacesClientGetCloudServiceNetworkInterfaceResponse{}, err @@ -340,10 +362,13 @@ func (client *InterfacesClient) BeginGetEffectiveRouteTable(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfacesClientGetEffectiveRouteTableResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfacesClientGetEffectiveRouteTableResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfacesClientGetEffectiveRouteTableResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -353,6 +378,10 @@ func (client *InterfacesClient) BeginGetEffectiveRouteTable(ctx context.Context, // Generated from API version 2023-05-01 func (client *InterfacesClient) getEffectiveRouteTable(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *InterfacesClientBeginGetEffectiveRouteTableOptions) (*http.Response, error) { var err error + const operationName = "InterfacesClient.BeginGetEffectiveRouteTable" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getEffectiveRouteTableCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) if err != nil { return nil, err @@ -408,6 +437,10 @@ func (client *InterfacesClient) getEffectiveRouteTableCreateRequest(ctx context. // method. func (client *InterfacesClient) GetVirtualMachineScaleSetIPConfiguration(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, ipConfigurationName string, options *InterfacesClientGetVirtualMachineScaleSetIPConfigurationOptions) (InterfacesClientGetVirtualMachineScaleSetIPConfigurationResponse, error) { var err error + const operationName = "InterfacesClient.GetVirtualMachineScaleSetIPConfiguration" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVirtualMachineScaleSetIPConfigurationCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, options) if err != nil { return InterfacesClientGetVirtualMachineScaleSetIPConfigurationResponse{}, err @@ -486,6 +519,10 @@ func (client *InterfacesClient) getVirtualMachineScaleSetIPConfigurationHandleRe // method. func (client *InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, options *InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse, error) { var err error + const operationName = "InterfacesClient.GetVirtualMachineScaleSetNetworkInterface" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVirtualMachineScaleSetNetworkInterfaceCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, options) if err != nil { return InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{}, err @@ -559,25 +596,20 @@ func (client *InterfacesClient) NewListPager(resourceGroupName string, options * return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListResponse) (InterfacesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InterfacesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return InterfacesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -622,25 +654,20 @@ func (client *InterfacesClient) NewListAllPager(options *InterfacesClientListAll return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListAllResponse) (InterfacesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InterfacesClientListAllResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return InterfacesClientListAllResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -684,25 +711,20 @@ func (client *InterfacesClient) NewListCloudServiceNetworkInterfacesPager(resour return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListCloudServiceNetworkInterfacesResponse) (InterfacesClientListCloudServiceNetworkInterfacesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCloudServiceNetworkInterfacesCreateRequest(ctx, resourceGroupName, cloudServiceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InterfacesClientListCloudServiceNetworkInterfacesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListCloudServiceNetworkInterfacesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCloudServiceNetworkInterfacesCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + }, nil) if err != nil { return InterfacesClientListCloudServiceNetworkInterfacesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListCloudServiceNetworkInterfacesResponse{}, runtime.NewResponseError(resp) - } return client.listCloudServiceNetworkInterfacesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -756,25 +778,20 @@ func (client *InterfacesClient) NewListCloudServiceRoleInstanceNetworkInterfaces return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse) (InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCloudServiceRoleInstanceNetworkInterfacesCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListCloudServiceRoleInstanceNetworkInterfacesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCloudServiceRoleInstanceNetworkInterfacesCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, options) + }, nil) if err != nil { return InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListCloudServiceRoleInstanceNetworkInterfacesResponse{}, runtime.NewResponseError(resp) - } return client.listCloudServiceRoleInstanceNetworkInterfacesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -833,10 +850,13 @@ func (client *InterfacesClient) BeginListEffectiveNetworkSecurityGroups(ctx cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfacesClientListEffectiveNetworkSecurityGroupsResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfacesClientListEffectiveNetworkSecurityGroupsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfacesClientListEffectiveNetworkSecurityGroupsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -846,6 +866,10 @@ func (client *InterfacesClient) BeginListEffectiveNetworkSecurityGroups(ctx cont // Generated from API version 2023-05-01 func (client *InterfacesClient) listEffectiveNetworkSecurityGroups(ctx context.Context, resourceGroupName string, networkInterfaceName string, options *InterfacesClientBeginListEffectiveNetworkSecurityGroupsOptions) (*http.Response, error) { var err error + const operationName = "InterfacesClient.BeginListEffectiveNetworkSecurityGroups" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listEffectiveNetworkSecurityGroupsCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) if err != nil { return nil, err @@ -903,25 +927,20 @@ func (client *InterfacesClient) NewListVirtualMachineScaleSetIPConfigurationsPag return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse) (InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listVirtualMachineScaleSetIPConfigurationsCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListVirtualMachineScaleSetIPConfigurationsPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listVirtualMachineScaleSetIPConfigurationsCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, options) + }, nil) if err != nil { return InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse{}, runtime.NewResponseError(resp) - } return client.listVirtualMachineScaleSetIPConfigurationsHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -984,25 +1003,20 @@ func (client *InterfacesClient) NewListVirtualMachineScaleSetNetworkInterfacesPa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse) (InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listVirtualMachineScaleSetNetworkInterfacesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListVirtualMachineScaleSetNetworkInterfacesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listVirtualMachineScaleSetNetworkInterfacesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, options) + }, nil) if err != nil { return InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListVirtualMachineScaleSetNetworkInterfacesResponse{}, runtime.NewResponseError(resp) - } return client.listVirtualMachineScaleSetNetworkInterfacesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -1056,25 +1070,20 @@ func (client *InterfacesClient) NewListVirtualMachineScaleSetVMNetworkInterfaces return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse) (InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listVirtualMachineScaleSetVMNetworkInterfacesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfacesClient.NewListVirtualMachineScaleSetVMNetworkInterfacesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listVirtualMachineScaleSetVMNetworkInterfacesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, options) + }, nil) if err != nil { return InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfacesClientListVirtualMachineScaleSetVMNetworkInterfacesResponse{}, runtime.NewResponseError(resp) - } return client.listVirtualMachineScaleSetVMNetworkInterfacesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -1127,6 +1136,10 @@ func (client *InterfacesClient) listVirtualMachineScaleSetVMNetworkInterfacesHan // - options - InterfacesClientUpdateTagsOptions contains the optional parameters for the InterfacesClient.UpdateTags method. func (client *InterfacesClient) UpdateTags(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters TagsObject, options *InterfacesClientUpdateTagsOptions) (InterfacesClientUpdateTagsResponse, error) { var err error + const operationName = "InterfacesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkInterfaceName, parameters, options) if err != nil { return InterfacesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfacetapconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfacetapconfigurations_client.go index eb334b62a..fdff9756b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfacetapconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfacetapconfigurations_client.go @@ -33,7 +33,7 @@ type InterfaceTapConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewInterfaceTapConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*InterfaceTapConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".InterfaceTapConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *InterfaceTapConfigurationsClient) BeginCreateOrUpdate(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfaceTapConfigurationsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfaceTapConfigurationsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfaceTapConfigurationsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *InterfaceTapConfigurationsClient) BeginCreateOrUpdate(ctx context. // Generated from API version 2023-05-01 func (client *InterfaceTapConfigurationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, tapConfigurationParameters InterfaceTapConfiguration, options *InterfaceTapConfigurationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "InterfaceTapConfigurationsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkInterfaceName, tapConfigurationName, tapConfigurationParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *InterfaceTapConfigurationsClient) BeginDelete(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[InterfaceTapConfigurationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[InterfaceTapConfigurationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[InterfaceTapConfigurationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *InterfaceTapConfigurationsClient) BeginDelete(ctx context.Context, // Generated from API version 2023-05-01 func (client *InterfaceTapConfigurationsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, options *InterfaceTapConfigurationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "InterfaceTapConfigurationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkInterfaceName, tapConfigurationName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *InterfaceTapConfigurationsClient) deleteCreateRequest(ctx context. // method. func (client *InterfaceTapConfigurationsClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, tapConfigurationName string, options *InterfaceTapConfigurationsClientGetOptions) (InterfaceTapConfigurationsClientGetResponse, error) { var err error + const operationName = "InterfaceTapConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkInterfaceName, tapConfigurationName, options) if err != nil { return InterfaceTapConfigurationsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *InterfaceTapConfigurationsClient) NewListPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *InterfaceTapConfigurationsClientListResponse) (InterfaceTapConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return InterfaceTapConfigurationsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "InterfaceTapConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkInterfaceName, options) + }, nil) if err != nil { return InterfaceTapConfigurationsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return InterfaceTapConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipallocations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipallocations_client.go index 4bed5730b..3aff2a84d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipallocations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipallocations_client.go @@ -33,7 +33,7 @@ type IPAllocationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewIPAllocationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*IPAllocationsClient, error) { - cl, err := arm.NewClient(moduleName+".IPAllocationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *IPAllocationsClient) BeginCreateOrUpdate(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[IPAllocationsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[IPAllocationsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[IPAllocationsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *IPAllocationsClient) BeginCreateOrUpdate(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *IPAllocationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters IPAllocation, options *IPAllocationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "IPAllocationsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, ipAllocationName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *IPAllocationsClient) BeginDelete(ctx context.Context, resourceGrou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[IPAllocationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[IPAllocationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[IPAllocationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *IPAllocationsClient) BeginDelete(ctx context.Context, resourceGrou // Generated from API version 2023-05-01 func (client *IPAllocationsClient) deleteOperation(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "IPAllocationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, ipAllocationName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *IPAllocationsClient) deleteCreateRequest(ctx context.Context, reso // - options - IPAllocationsClientGetOptions contains the optional parameters for the IPAllocationsClient.Get method. func (client *IPAllocationsClient) Get(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientGetOptions) (IPAllocationsClientGetResponse, error) { var err error + const operationName = "IPAllocationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, ipAllocationName, options) if err != nil { return IPAllocationsClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *IPAllocationsClient) NewListPager(options *IPAllocationsClientList return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *IPAllocationsClientListResponse) (IPAllocationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return IPAllocationsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "IPAllocationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return IPAllocationsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return IPAllocationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -322,25 +335,20 @@ func (client *IPAllocationsClient) NewListByResourceGroupPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *IPAllocationsClientListByResourceGroupResponse) (IPAllocationsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "IPAllocationsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return IPAllocationsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return IPAllocationsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return IPAllocationsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -386,6 +394,10 @@ func (client *IPAllocationsClient) listByResourceGroupHandleResponse(resp *http. // method. func (client *IPAllocationsClient) UpdateTags(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters TagsObject, options *IPAllocationsClientUpdateTagsOptions) (IPAllocationsClientUpdateTagsResponse, error) { var err error + const operationName = "IPAllocationsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, ipAllocationName, parameters, options) if err != nil { return IPAllocationsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipgroups_client.go index c64351ce1..e64510aa5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipgroups_client.go @@ -33,7 +33,7 @@ type IPGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewIPGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*IPGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".IPGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *IPGroupsClient) BeginCreateOrUpdate(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[IPGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[IPGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[IPGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *IPGroupsClient) BeginCreateOrUpdate(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *IPGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, ipGroupsName string, parameters IPGroup, options *IPGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "IPGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, ipGroupsName, parameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *IPGroupsClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[IPGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[IPGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[IPGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *IPGroupsClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *IPGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, ipGroupsName string, options *IPGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "IPGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, ipGroupsName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *IPGroupsClient) deleteCreateRequest(ctx context.Context, resourceG // - options - IPGroupsClientGetOptions contains the optional parameters for the IPGroupsClient.Get method. func (client *IPGroupsClient) Get(ctx context.Context, resourceGroupName string, ipGroupsName string, options *IPGroupsClientGetOptions) (IPGroupsClientGetResponse, error) { var err error + const operationName = "IPGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, ipGroupsName, options) if err != nil { return IPGroupsClientGetResponse{}, err @@ -260,25 +278,20 @@ func (client *IPGroupsClient) NewListPager(options *IPGroupsClientListOptions) * return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *IPGroupsClientListResponse) (IPGroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return IPGroupsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "IPGroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return IPGroupsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return IPGroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -321,25 +334,20 @@ func (client *IPGroupsClient) NewListByResourceGroupPager(resourceGroupName stri return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *IPGroupsClientListByResourceGroupResponse) (IPGroupsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "IPGroupsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return IPGroupsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return IPGroupsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return IPGroupsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -384,6 +392,10 @@ func (client *IPGroupsClient) listByResourceGroupHandleResponse(resp *http.Respo // - options - IPGroupsClientUpdateGroupsOptions contains the optional parameters for the IPGroupsClient.UpdateGroups method. func (client *IPGroupsClient) UpdateGroups(ctx context.Context, resourceGroupName string, ipGroupsName string, parameters TagsObject, options *IPGroupsClientUpdateGroupsOptions) (IPGroupsClientUpdateGroupsResponse, error) { var err error + const operationName = "IPGroupsClient.UpdateGroups" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateGroupsCreateRequest(ctx, resourceGroupName, ipGroupsName, parameters, options) if err != nil { return IPGroupsClientUpdateGroupsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerbackendaddresspools_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerbackendaddresspools_client.go index 1ce4047e6..f0c7b034f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerbackendaddresspools_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerbackendaddresspools_client.go @@ -33,7 +33,7 @@ type LoadBalancerBackendAddressPoolsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerBackendAddressPoolsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerBackendAddressPoolsClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerBackendAddressPoolsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *LoadBalancerBackendAddressPoolsClient) BeginCreateOrUpdate(ctx con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancerBackendAddressPoolsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *LoadBalancerBackendAddressPoolsClient) BeginCreateOrUpdate(ctx con // Generated from API version 2023-05-01 func (client *LoadBalancerBackendAddressPoolsClient) createOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, parameters BackendAddressPool, options *LoadBalancerBackendAddressPoolsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancerBackendAddressPoolsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, loadBalancerName, backendAddressPoolName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *LoadBalancerBackendAddressPoolsClient) BeginDelete(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancerBackendAddressPoolsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancerBackendAddressPoolsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancerBackendAddressPoolsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *LoadBalancerBackendAddressPoolsClient) BeginDelete(ctx context.Con // Generated from API version 2023-05-01 func (client *LoadBalancerBackendAddressPoolsClient) deleteOperation(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, options *LoadBalancerBackendAddressPoolsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancerBackendAddressPoolsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, loadBalancerName, backendAddressPoolName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *LoadBalancerBackendAddressPoolsClient) deleteCreateRequest(ctx con // method. func (client *LoadBalancerBackendAddressPoolsClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string, options *LoadBalancerBackendAddressPoolsClientGetOptions) (LoadBalancerBackendAddressPoolsClientGetResponse, error) { var err error + const operationName = "LoadBalancerBackendAddressPoolsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, backendAddressPoolName, options) if err != nil { return LoadBalancerBackendAddressPoolsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *LoadBalancerBackendAddressPoolsClient) NewListPager(resourceGroupN return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerBackendAddressPoolsClientListResponse) (LoadBalancerBackendAddressPoolsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return LoadBalancerBackendAddressPoolsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerBackendAddressPoolsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerBackendAddressPoolsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerBackendAddressPoolsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerfrontendipconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerfrontendipconfigurations_client.go index 6a09ff1d3..8d996375c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerfrontendipconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerfrontendipconfigurations_client.go @@ -33,7 +33,7 @@ type LoadBalancerFrontendIPConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerFrontendIPConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerFrontendIPConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerFrontendIPConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewLoadBalancerFrontendIPConfigurationsClient(subscriptionID string, creden // method. func (client *LoadBalancerFrontendIPConfigurationsClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, frontendIPConfigurationName string, options *LoadBalancerFrontendIPConfigurationsClientGetOptions) (LoadBalancerFrontendIPConfigurationsClientGetResponse, error) { var err error + const operationName = "LoadBalancerFrontendIPConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, frontendIPConfigurationName, options) if err != nil { return LoadBalancerFrontendIPConfigurationsClientGetResponse{}, err @@ -123,25 +127,20 @@ func (client *LoadBalancerFrontendIPConfigurationsClient) NewListPager(resourceG return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerFrontendIPConfigurationsClientListResponse) (LoadBalancerFrontendIPConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerFrontendIPConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerFrontendIPConfigurationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancerFrontendIPConfigurationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerFrontendIPConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerloadbalancingrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerloadbalancingrules_client.go index 6e07bbafc..2e3039096 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerloadbalancingrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerloadbalancingrules_client.go @@ -33,7 +33,7 @@ type LoadBalancerLoadBalancingRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerLoadBalancingRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerLoadBalancingRulesClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerLoadBalancingRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewLoadBalancerLoadBalancingRulesClient(subscriptionID string, credential a // method. func (client *LoadBalancerLoadBalancingRulesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, loadBalancingRuleName string, options *LoadBalancerLoadBalancingRulesClientGetOptions) (LoadBalancerLoadBalancingRulesClientGetResponse, error) { var err error + const operationName = "LoadBalancerLoadBalancingRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, loadBalancingRuleName, options) if err != nil { return LoadBalancerLoadBalancingRulesClientGetResponse{}, err @@ -123,25 +127,20 @@ func (client *LoadBalancerLoadBalancingRulesClient) NewListPager(resourceGroupNa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerLoadBalancingRulesClientListResponse) (LoadBalancerLoadBalancingRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerLoadBalancingRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerLoadBalancingRulesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancerLoadBalancingRulesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerLoadBalancingRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancernetworkinterfaces_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancernetworkinterfaces_client.go index cf44ee9f9..379161ac5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancernetworkinterfaces_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancernetworkinterfaces_client.go @@ -33,7 +33,7 @@ type LoadBalancerNetworkInterfacesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerNetworkInterfacesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerNetworkInterfacesClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerNetworkInterfacesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *LoadBalancerNetworkInterfacesClient) NewListPager(resourceGroupNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerNetworkInterfacesClientListResponse) (LoadBalancerNetworkInterfacesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerNetworkInterfacesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerNetworkInterfacesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancerNetworkInterfacesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerNetworkInterfacesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalanceroutboundrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalanceroutboundrules_client.go index 538ae3abb..1c4826487 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalanceroutboundrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalanceroutboundrules_client.go @@ -33,7 +33,7 @@ type LoadBalancerOutboundRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerOutboundRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerOutboundRulesClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerOutboundRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewLoadBalancerOutboundRulesClient(subscriptionID string, credential azcore // method. func (client *LoadBalancerOutboundRulesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, outboundRuleName string, options *LoadBalancerOutboundRulesClientGetOptions) (LoadBalancerOutboundRulesClientGetResponse, error) { var err error + const operationName = "LoadBalancerOutboundRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, outboundRuleName, options) if err != nil { return LoadBalancerOutboundRulesClientGetResponse{}, err @@ -123,25 +127,20 @@ func (client *LoadBalancerOutboundRulesClient) NewListPager(resourceGroupName st return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerOutboundRulesClientListResponse) (LoadBalancerOutboundRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerOutboundRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerOutboundRulesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancerOutboundRulesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerOutboundRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerprobes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerprobes_client.go index 573246dff..8ab5b64e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerprobes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerprobes_client.go @@ -33,7 +33,7 @@ type LoadBalancerProbesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancerProbesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancerProbesClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancerProbesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewLoadBalancerProbesClient(subscriptionID string, credential azcore.TokenC // - options - LoadBalancerProbesClientGetOptions contains the optional parameters for the LoadBalancerProbesClient.Get method. func (client *LoadBalancerProbesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, probeName string, options *LoadBalancerProbesClientGetOptions) (LoadBalancerProbesClientGetResponse, error) { var err error + const operationName = "LoadBalancerProbesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, probeName, options) if err != nil { return LoadBalancerProbesClientGetResponse{}, err @@ -122,25 +126,20 @@ func (client *LoadBalancerProbesClient) NewListPager(resourceGroupName string, l return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancerProbesClientListResponse) (LoadBalancerProbesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancerProbesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, loadBalancerName, options) + }, nil) if err != nil { return LoadBalancerProbesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancerProbesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancerProbesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancers_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancers_client.go index 7ebced80b..3dc0f757c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancers_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancers_client.go @@ -33,7 +33,7 @@ type LoadBalancersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLoadBalancersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LoadBalancersClient, error) { - cl, err := arm.NewClient(moduleName+".LoadBalancersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *LoadBalancersClient) BeginCreateOrUpdate(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancersClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *LoadBalancersClient) BeginCreateOrUpdate(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *LoadBalancersClient) createOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters LoadBalancer, options *LoadBalancersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancersClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, loadBalancerName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *LoadBalancersClient) BeginDelete(ctx context.Context, resourceGrou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *LoadBalancersClient) BeginDelete(ctx context.Context, resourceGrou // Generated from API version 2023-05-01 func (client *LoadBalancersClient) deleteOperation(ctx context.Context, resourceGroupName string, loadBalancerName string, options *LoadBalancersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, loadBalancerName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *LoadBalancersClient) deleteCreateRequest(ctx context.Context, reso // - options - LoadBalancersClientGetOptions contains the optional parameters for the LoadBalancersClient.Get method. func (client *LoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, options *LoadBalancersClientGetOptions) (LoadBalancersClientGetResponse, error) { var err error + const operationName = "LoadBalancersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, loadBalancerName, options) if err != nil { return LoadBalancersClientGetResponse{}, err @@ -262,25 +280,20 @@ func (client *LoadBalancersClient) NewListPager(resourceGroupName string, option return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancersClientListResponse) (LoadBalancersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return LoadBalancersClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return LoadBalancersClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -326,25 +339,20 @@ func (client *LoadBalancersClient) NewListAllPager(options *LoadBalancersClientL return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LoadBalancersClientListAllResponse) (LoadBalancersClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LoadBalancersClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return LoadBalancersClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return LoadBalancersClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LoadBalancersClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -393,10 +401,13 @@ func (client *LoadBalancersClient) BeginListInboundNatRulePortMappings(ctx conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancersClientListInboundNatRulePortMappingsResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancersClientListInboundNatRulePortMappingsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancersClientListInboundNatRulePortMappingsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -406,6 +417,10 @@ func (client *LoadBalancersClient) BeginListInboundNatRulePortMappings(ctx conte // Generated from API version 2023-05-01 func (client *LoadBalancersClient) listInboundNatRulePortMappings(ctx context.Context, groupName string, loadBalancerName string, backendPoolName string, parameters QueryInboundNatRulePortMappingRequest, options *LoadBalancersClientBeginListInboundNatRulePortMappingsOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancersClient.BeginListInboundNatRulePortMappings" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listInboundNatRulePortMappingsCreateRequest(ctx, groupName, loadBalancerName, backendPoolName, parameters, options) if err != nil { return nil, err @@ -464,6 +479,10 @@ func (client *LoadBalancersClient) listInboundNatRulePortMappingsCreateRequest(c // method. func (client *LoadBalancersClient) MigrateToIPBased(ctx context.Context, groupName string, loadBalancerName string, options *LoadBalancersClientMigrateToIPBasedOptions) (LoadBalancersClientMigrateToIPBasedResponse, error) { var err error + const operationName = "LoadBalancersClient.MigrateToIPBased" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.migrateToIPBasedCreateRequest(ctx, groupName, loadBalancerName, options) if err != nil { return LoadBalancersClientMigrateToIPBasedResponse{}, err @@ -537,10 +556,13 @@ func (client *LoadBalancersClient) BeginSwapPublicIPAddresses(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LoadBalancersClientSwapPublicIPAddressesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LoadBalancersClientSwapPublicIPAddressesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LoadBalancersClientSwapPublicIPAddressesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -550,6 +572,10 @@ func (client *LoadBalancersClient) BeginSwapPublicIPAddresses(ctx context.Contex // Generated from API version 2023-05-01 func (client *LoadBalancersClient) swapPublicIPAddresses(ctx context.Context, location string, parameters LoadBalancerVipSwapRequest, options *LoadBalancersClientBeginSwapPublicIPAddressesOptions) (*http.Response, error) { var err error + const operationName = "LoadBalancersClient.BeginSwapPublicIPAddresses" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.swapPublicIPAddressesCreateRequest(ctx, location, parameters, options) if err != nil { return nil, err @@ -601,6 +627,10 @@ func (client *LoadBalancersClient) swapPublicIPAddressesCreateRequest(ctx contex // method. func (client *LoadBalancersClient) UpdateTags(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters TagsObject, options *LoadBalancersClientUpdateTagsOptions) (LoadBalancersClientUpdateTagsResponse, error) { var err error + const operationName = "LoadBalancersClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, loadBalancerName, parameters, options) if err != nil { return LoadBalancersClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/localnetworkgateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/localnetworkgateways_client.go index e4424dbb5..9f5a5e7c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/localnetworkgateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/localnetworkgateways_client.go @@ -33,7 +33,7 @@ type LocalNetworkGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewLocalNetworkGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LocalNetworkGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".LocalNetworkGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *LocalNetworkGatewaysClient) BeginCreateOrUpdate(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalNetworkGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LocalNetworkGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LocalNetworkGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *LocalNetworkGatewaysClient) BeginCreateOrUpdate(ctx context.Contex // Generated from API version 2023-05-01 func (client *LocalNetworkGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, options *LocalNetworkGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "LocalNetworkGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *LocalNetworkGatewaysClient) BeginDelete(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalNetworkGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[LocalNetworkGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[LocalNetworkGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *LocalNetworkGatewaysClient) BeginDelete(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *LocalNetworkGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, options *LocalNetworkGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "LocalNetworkGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, localNetworkGatewayName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *LocalNetworkGatewaysClient) deleteCreateRequest(ctx context.Contex // method. func (client *LocalNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, options *LocalNetworkGatewaysClientGetOptions) (LocalNetworkGatewaysClientGetResponse, error) { var err error + const operationName = "LocalNetworkGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, localNetworkGatewayName, options) if err != nil { return LocalNetworkGatewaysClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *LocalNetworkGatewaysClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *LocalNetworkGatewaysClientListResponse) (LocalNetworkGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return LocalNetworkGatewaysClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "LocalNetworkGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return LocalNetworkGatewaysClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return LocalNetworkGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -325,6 +338,10 @@ func (client *LocalNetworkGatewaysClient) listHandleResponse(resp *http.Response // method. func (client *LocalNetworkGatewaysClient) UpdateTags(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters TagsObject, options *LocalNetworkGatewaysClientUpdateTagsOptions) (LocalNetworkGatewaysClientUpdateTagsResponse, error) { var err error + const operationName = "LocalNetworkGatewaysClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, localNetworkGatewayName, parameters, options) if err != nil { return LocalNetworkGatewaysClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/management_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/management_client.go index b2c5e9510..2955b415a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/management_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/management_client.go @@ -34,7 +34,7 @@ type ManagementClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewManagementClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagementClient, error) { - cl, err := arm.NewClient(moduleName+".ManagementClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewManagementClient(subscriptionID string, credential azcore.TokenCredentia // method. func (client *ManagementClient) CheckDNSNameAvailability(ctx context.Context, location string, domainNameLabel string, options *ManagementClientCheckDNSNameAvailabilityOptions) (ManagementClientCheckDNSNameAvailabilityResponse, error) { var err error + const operationName = "ManagementClient.CheckDNSNameAvailability" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.checkDNSNameAvailabilityCreateRequest(ctx, location, domainNameLabel, options) if err != nil { return ManagementClientCheckDNSNameAvailabilityResponse{}, err @@ -120,10 +124,13 @@ func (client *ManagementClient) BeginDeleteBastionShareableLink(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagementClientDeleteBastionShareableLinkResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ManagementClientDeleteBastionShareableLinkResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ManagementClientDeleteBastionShareableLinkResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -133,6 +140,10 @@ func (client *ManagementClient) BeginDeleteBastionShareableLink(ctx context.Cont // Generated from API version 2023-05-01 func (client *ManagementClient) deleteBastionShareableLink(ctx context.Context, resourceGroupName string, bastionHostName string, bslRequest BastionShareableLinkListRequest, options *ManagementClientBeginDeleteBastionShareableLinkOptions) (*http.Response, error) { var err error + const operationName = "ManagementClient.BeginDeleteBastionShareableLink" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteBastionShareableLinkCreateRequest(ctx, resourceGroupName, bastionHostName, bslRequest, options) if err != nil { return nil, err @@ -191,25 +202,20 @@ func (client *ManagementClient) NewDisconnectActiveSessionsPager(resourceGroupNa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagementClientDisconnectActiveSessionsResponse) (ManagementClientDisconnectActiveSessionsResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.disconnectActiveSessionsCreateRequest(ctx, resourceGroupName, bastionHostName, sessionIDs, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagementClient.NewDisconnectActiveSessionsPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.disconnectActiveSessionsCreateRequest(ctx, resourceGroupName, bastionHostName, sessionIDs, options) + }, nil) if err != nil { return ManagementClientDisconnectActiveSessionsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagementClientDisconnectActiveSessionsResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagementClientDisconnectActiveSessionsResponse{}, runtime.NewResponseError(resp) - } return client.disconnectActiveSessionsHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -260,6 +266,10 @@ func (client *ManagementClient) disconnectActiveSessionsHandleResponse(resp *htt // method. func (client *ManagementClient) ExpressRouteProviderPort(ctx context.Context, providerport string, options *ManagementClientExpressRouteProviderPortOptions) (ManagementClientExpressRouteProviderPortResponse, error) { var err error + const operationName = "ManagementClient.ExpressRouteProviderPort" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.expressRouteProviderPortCreateRequest(ctx, providerport, options) if err != nil { return ManagementClientExpressRouteProviderPortResponse{}, err @@ -325,10 +335,13 @@ func (client *ManagementClient) BeginGeneratevirtualwanvpnserverconfigurationvpn } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ManagementClientGeneratevirtualwanvpnserverconfigurationvpnprofileResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -339,6 +352,10 @@ func (client *ManagementClient) BeginGeneratevirtualwanvpnserverconfigurationvpn // Generated from API version 2023-05-01 func (client *ManagementClient) generatevirtualwanvpnserverconfigurationvpnprofile(ctx context.Context, resourceGroupName string, virtualWANName string, vpnClientParams VirtualWanVPNProfileParameters, options *ManagementClientBeginGeneratevirtualwanvpnserverconfigurationvpnprofileOptions) (*http.Response, error) { var err error + const operationName = "ManagementClient.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.generatevirtualwanvpnserverconfigurationvpnprofileCreateRequest(ctx, resourceGroupName, virtualWANName, vpnClientParams, options) if err != nil { return nil, err @@ -396,19 +413,16 @@ func (client *ManagementClient) BeginGetActiveSessions(ctx context.Context, reso return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagementClientGetActiveSessionsResponse) (ManagementClientGetActiveSessionsResponse, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - if err != nil { - return ManagementClientGetActiveSessionsResponse{}, err - } - resp, err := client.internal.Pipeline().Do(req) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagementClient.BeginGetActiveSessions") + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), *page.NextLink, func(ctx context.Context) (*policy.Request, error) { + return client.getActiveSessionsCreateRequest(ctx, resourceGroupName, bastionHostName, options) + }, nil) if err != nil { return ManagementClientGetActiveSessionsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagementClientGetActiveSessionsResponse{}, runtime.NewResponseError(resp) - } return client.getActiveSessionsHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) if options == nil || options.ResumeToken == "" { resp, err := client.getActiveSessions(ctx, resourceGroupName, bastionHostName, options) @@ -418,11 +432,13 @@ func (client *ManagementClient) BeginGetActiveSessions(ctx context.Context, reso poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[*runtime.Pager[ManagementClientGetActiveSessionsResponse]]{ FinalStateVia: runtime.FinalStateViaLocation, Response: &pager, + Tracer: client.internal.Tracer(), }) return poller, err } else { return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[*runtime.Pager[ManagementClientGetActiveSessionsResponse]]{ Response: &pager, + Tracer: client.internal.Tracer(), }) } } @@ -432,6 +448,10 @@ func (client *ManagementClient) BeginGetActiveSessions(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *ManagementClient) getActiveSessions(ctx context.Context, resourceGroupName string, bastionHostName string, options *ManagementClientBeginGetActiveSessionsOptions) (*http.Response, error) { var err error + const operationName = "ManagementClient.BeginGetActiveSessions" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getActiveSessionsCreateRequest(ctx, resourceGroupName, bastionHostName, options) if err != nil { return nil, err @@ -496,25 +516,20 @@ func (client *ManagementClient) NewGetBastionShareableLinkPager(resourceGroupNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagementClientGetBastionShareableLinkResponse) (ManagementClientGetBastionShareableLinkResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.getBastionShareableLinkCreateRequest(ctx, resourceGroupName, bastionHostName, bslRequest, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagementClient.NewGetBastionShareableLinkPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.getBastionShareableLinkCreateRequest(ctx, resourceGroupName, bastionHostName, bslRequest, options) + }, nil) if err != nil { return ManagementClientGetBastionShareableLinkResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagementClientGetBastionShareableLinkResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagementClientGetBastionShareableLinkResponse{}, runtime.NewResponseError(resp) - } return client.getBastionShareableLinkHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -567,6 +582,10 @@ func (client *ManagementClient) getBastionShareableLinkHandleResponse(resp *http // method. func (client *ManagementClient) ListActiveConnectivityConfigurations(ctx context.Context, resourceGroupName string, networkManagerName string, parameters ActiveConfigurationParameter, options *ManagementClientListActiveConnectivityConfigurationsOptions) (ManagementClientListActiveConnectivityConfigurationsResponse, error) { var err error + const operationName = "ManagementClient.ListActiveConnectivityConfigurations" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listActiveConnectivityConfigurationsCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return ManagementClientListActiveConnectivityConfigurationsResponse{}, err @@ -635,6 +654,10 @@ func (client *ManagementClient) listActiveConnectivityConfigurationsHandleRespon // method. func (client *ManagementClient) ListActiveSecurityAdminRules(ctx context.Context, resourceGroupName string, networkManagerName string, parameters ActiveConfigurationParameter, options *ManagementClientListActiveSecurityAdminRulesOptions) (ManagementClientListActiveSecurityAdminRulesResponse, error) { var err error + const operationName = "ManagementClient.ListActiveSecurityAdminRules" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listActiveSecurityAdminRulesCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return ManagementClientListActiveSecurityAdminRulesResponse{}, err @@ -704,6 +727,10 @@ func (client *ManagementClient) listActiveSecurityAdminRulesHandleResponse(resp // for the ManagementClient.ListNetworkManagerEffectiveConnectivityConfigurations method. func (client *ManagementClient) ListNetworkManagerEffectiveConnectivityConfigurations(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters QueryRequestOptions, options *ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsOptions) (ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsResponse, error) { var err error + const operationName = "ManagementClient.ListNetworkManagerEffectiveConnectivityConfigurations" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listNetworkManagerEffectiveConnectivityConfigurationsCreateRequest(ctx, resourceGroupName, virtualNetworkName, parameters, options) if err != nil { return ManagementClientListNetworkManagerEffectiveConnectivityConfigurationsResponse{}, err @@ -772,6 +799,10 @@ func (client *ManagementClient) listNetworkManagerEffectiveConnectivityConfigura // ManagementClient.ListNetworkManagerEffectiveSecurityAdminRules method. func (client *ManagementClient) ListNetworkManagerEffectiveSecurityAdminRules(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters QueryRequestOptions, options *ManagementClientListNetworkManagerEffectiveSecurityAdminRulesOptions) (ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse, error) { var err error + const operationName = "ManagementClient.ListNetworkManagerEffectiveSecurityAdminRules" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listNetworkManagerEffectiveSecurityAdminRulesCreateRequest(ctx, resourceGroupName, virtualNetworkName, parameters, options) if err != nil { return ManagementClientListNetworkManagerEffectiveSecurityAdminRulesResponse{}, err @@ -843,19 +874,16 @@ func (client *ManagementClient) BeginPutBastionShareableLink(ctx context.Context return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagementClientPutBastionShareableLinkResponse) (ManagementClientPutBastionShareableLinkResponse, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagementClient.BeginPutBastionShareableLink") + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), *page.NextLink, func(ctx context.Context) (*policy.Request, error) { + return client.putBastionShareableLinkCreateRequest(ctx, resourceGroupName, bastionHostName, bslRequest, options) + }, nil) if err != nil { return ManagementClientPutBastionShareableLinkResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagementClientPutBastionShareableLinkResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagementClientPutBastionShareableLinkResponse{}, runtime.NewResponseError(resp) - } return client.putBastionShareableLinkHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) if options == nil || options.ResumeToken == "" { resp, err := client.putBastionShareableLink(ctx, resourceGroupName, bastionHostName, bslRequest, options) @@ -865,11 +893,13 @@ func (client *ManagementClient) BeginPutBastionShareableLink(ctx context.Context poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[*runtime.Pager[ManagementClientPutBastionShareableLinkResponse]]{ FinalStateVia: runtime.FinalStateViaLocation, Response: &pager, + Tracer: client.internal.Tracer(), }) return poller, err } else { return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[*runtime.Pager[ManagementClientPutBastionShareableLinkResponse]]{ Response: &pager, + Tracer: client.internal.Tracer(), }) } } @@ -879,6 +909,10 @@ func (client *ManagementClient) BeginPutBastionShareableLink(ctx context.Context // Generated from API version 2023-05-01 func (client *ManagementClient) putBastionShareableLink(ctx context.Context, resourceGroupName string, bastionHostName string, bslRequest BastionShareableLinkListRequest, options *ManagementClientBeginPutBastionShareableLinkOptions) (*http.Response, error) { var err error + const operationName = "ManagementClient.BeginPutBastionShareableLink" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.putBastionShareableLinkCreateRequest(ctx, resourceGroupName, bastionHostName, bslRequest, options) if err != nil { return nil, err @@ -942,6 +976,10 @@ func (client *ManagementClient) putBastionShareableLinkHandleResponse(resp *http // method. func (client *ManagementClient) SupportedSecurityProviders(ctx context.Context, resourceGroupName string, virtualWANName string, options *ManagementClientSupportedSecurityProvidersOptions) (ManagementClientSupportedSecurityProvidersResponse, error) { var err error + const operationName = "ManagementClient.SupportedSecurityProviders" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.supportedSecurityProvidersCreateRequest(ctx, resourceGroupName, virtualWANName, options) if err != nil { return ManagementClientSupportedSecurityProvidersResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managementgroupnetworkmanagerconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managementgroupnetworkmanagerconnections_client.go index 5af5d9aec..a672f6e34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managementgroupnetworkmanagerconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managementgroupnetworkmanagerconnections_client.go @@ -31,7 +31,7 @@ type ManagementGroupNetworkManagerConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewManagementGroupNetworkManagerConnectionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagementGroupNetworkManagerConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ManagementGroupNetworkManagerConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -52,6 +52,10 @@ func NewManagementGroupNetworkManagerConnectionsClient(credential azcore.TokenCr // ManagementGroupNetworkManagerConnectionsClient.CreateOrUpdate method. func (client *ManagementGroupNetworkManagerConnectionsClient) CreateOrUpdate(ctx context.Context, managementGroupID string, networkManagerConnectionName string, parameters ManagerConnection, options *ManagementGroupNetworkManagerConnectionsClientCreateOrUpdateOptions) (ManagementGroupNetworkManagerConnectionsClientCreateOrUpdateResponse, error) { var err error + const operationName = "ManagementGroupNetworkManagerConnectionsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, managementGroupID, networkManagerConnectionName, parameters, options) if err != nil { return ManagementGroupNetworkManagerConnectionsClientCreateOrUpdateResponse{}, err @@ -112,6 +116,10 @@ func (client *ManagementGroupNetworkManagerConnectionsClient) createOrUpdateHand // method. func (client *ManagementGroupNetworkManagerConnectionsClient) Delete(ctx context.Context, managementGroupID string, networkManagerConnectionName string, options *ManagementGroupNetworkManagerConnectionsClientDeleteOptions) (ManagementGroupNetworkManagerConnectionsClientDeleteResponse, error) { var err error + const operationName = "ManagementGroupNetworkManagerConnectionsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, managementGroupID, networkManagerConnectionName, options) if err != nil { return ManagementGroupNetworkManagerConnectionsClientDeleteResponse{}, err @@ -159,6 +167,10 @@ func (client *ManagementGroupNetworkManagerConnectionsClient) deleteCreateReques // method. func (client *ManagementGroupNetworkManagerConnectionsClient) Get(ctx context.Context, managementGroupID string, networkManagerConnectionName string, options *ManagementGroupNetworkManagerConnectionsClientGetOptions) (ManagementGroupNetworkManagerConnectionsClientGetResponse, error) { var err error + const operationName = "ManagementGroupNetworkManagerConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, managementGroupID, networkManagerConnectionName, options) if err != nil { return ManagementGroupNetworkManagerConnectionsClientGetResponse{}, err @@ -218,25 +230,20 @@ func (client *ManagementGroupNetworkManagerConnectionsClient) NewListPager(manag return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagementGroupNetworkManagerConnectionsClientListResponse) (ManagementGroupNetworkManagerConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, managementGroupID, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagementGroupNetworkManagerConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, managementGroupID, options) + }, nil) if err != nil { return ManagementGroupNetworkManagerConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagementGroupNetworkManagerConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagementGroupNetworkManagerConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managercommits_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managercommits_client.go index 1b99c05cd..664336970 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managercommits_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managercommits_client.go @@ -33,7 +33,7 @@ type ManagerCommitsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewManagerCommitsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagerCommitsClient, error) { - cl, err := arm.NewClient(moduleName+".ManagerCommitsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ManagerCommitsClient) BeginPost(ctx context.Context, resourceGroup } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagerCommitsClientPostResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ManagerCommitsClientPostResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ManagerCommitsClientPostResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ManagerCommitsClient) BeginPost(ctx context.Context, resourceGroup // Generated from API version 2023-05-01 func (client *ManagerCommitsClient) post(ctx context.Context, resourceGroupName string, networkManagerName string, parameters ManagerCommit, options *ManagerCommitsClientBeginPostOptions) (*http.Response, error) { var err error + const operationName = "ManagerCommitsClient.BeginPost" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.postCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managerdeploymentstatus_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managerdeploymentstatus_client.go index 896c44131..d4d5b8580 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managerdeploymentstatus_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managerdeploymentstatus_client.go @@ -34,7 +34,7 @@ type ManagerDeploymentStatusClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewManagerDeploymentStatusClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagerDeploymentStatusClient, error) { - cl, err := arm.NewClient(moduleName+".ManagerDeploymentStatusClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,6 +56,10 @@ func NewManagerDeploymentStatusClient(subscriptionID string, credential azcore.T // method. func (client *ManagerDeploymentStatusClient) List(ctx context.Context, resourceGroupName string, networkManagerName string, parameters ManagerDeploymentStatusParameter, options *ManagerDeploymentStatusClientListOptions) (ManagerDeploymentStatusClientListResponse, error) { var err error + const operationName = "ManagerDeploymentStatusClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return ManagerDeploymentStatusClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managers_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managers_client.go index 77237d574..4bb745c86 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managers_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managers_client.go @@ -34,7 +34,7 @@ type ManagersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewManagersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ManagersClient, error) { - cl, err := arm.NewClient(moduleName+".ManagersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewManagersClient(subscriptionID string, credential azcore.TokenCredential, // - options - ManagersClientCreateOrUpdateOptions contains the optional parameters for the ManagersClient.CreateOrUpdate method. func (client *ManagersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, parameters Manager, options *ManagersClientCreateOrUpdateOptions) (ManagersClientCreateOrUpdateResponse, error) { var err error + const operationName = "ManagersClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return ManagersClientCreateOrUpdateResponse{}, err @@ -124,10 +128,13 @@ func (client *ManagersClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ManagersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ManagersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ManagersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -137,6 +144,10 @@ func (client *ManagersClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *ManagersClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, options *ManagersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ManagersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, options) if err != nil { return nil, err @@ -190,6 +201,10 @@ func (client *ManagersClient) deleteCreateRequest(ctx context.Context, resourceG // - options - ManagersClientGetOptions contains the optional parameters for the ManagersClient.Get method. func (client *ManagersClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, options *ManagersClientGetOptions) (ManagersClientGetResponse, error) { var err error + const operationName = "ManagersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, options) if err != nil { return ManagersClientGetResponse{}, err @@ -252,25 +267,20 @@ func (client *ManagersClient) NewListPager(resourceGroupName string, options *Ma return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagersClientListResponse) (ManagersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ManagersClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagersClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -322,25 +332,20 @@ func (client *ManagersClient) NewListBySubscriptionPager(options *ManagersClient return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ManagersClientListBySubscriptionResponse) (ManagersClientListBySubscriptionResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listBySubscriptionCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ManagersClient.NewListBySubscriptionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listBySubscriptionCreateRequest(ctx, options) + }, nil) if err != nil { return ManagersClientListBySubscriptionResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ManagersClientListBySubscriptionResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ManagersClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) - } return client.listBySubscriptionHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -387,6 +392,10 @@ func (client *ManagersClient) listBySubscriptionHandleResponse(resp *http.Respon // - options - ManagersClientPatchOptions contains the optional parameters for the ManagersClient.Patch method. func (client *ManagersClient) Patch(ctx context.Context, resourceGroupName string, networkManagerName string, parameters PatchObject, options *ManagersClientPatchOptions) (ManagersClientPatchResponse, error) { var err error + const operationName = "ManagersClient.Patch" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.patchCreateRequest(ctx, resourceGroupName, networkManagerName, parameters, options) if err != nil { return ManagersClientPatchResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models_serde.go index 125e0c5cf..affbe8f63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models_serde.go @@ -84,7 +84,7 @@ func (a *Action) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ActiveBaseSecurityAdminRule. func (a ActiveBaseSecurityAdminRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "commitTime", a.CommitTime) + populateDateTimeRFC3339(objectMap, "commitTime", a.CommitTime) populate(objectMap, "configurationDescription", a.ConfigurationDescription) populate(objectMap, "id", a.ID) objectMap["kind"] = a.Kind @@ -105,7 +105,7 @@ func (a *ActiveBaseSecurityAdminRule) UnmarshalJSON(data []byte) error { var err error switch key { case "commitTime": - err = unpopulateTimeRFC3339(val, "CommitTime", &a.CommitTime) + err = unpopulateDateTimeRFC3339(val, "CommitTime", &a.CommitTime) delete(rawMsg, key) case "configurationDescription": err = unpopulate(val, "ConfigurationDescription", &a.ConfigurationDescription) @@ -170,7 +170,7 @@ func (a *ActiveConfigurationParameter) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ActiveConnectivityConfiguration. func (a ActiveConnectivityConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "commitTime", a.CommitTime) + populateDateTimeRFC3339(objectMap, "commitTime", a.CommitTime) populate(objectMap, "configurationGroups", a.ConfigurationGroups) populate(objectMap, "id", a.ID) populate(objectMap, "properties", a.Properties) @@ -188,7 +188,7 @@ func (a *ActiveConnectivityConfiguration) UnmarshalJSON(data []byte) error { var err error switch key { case "commitTime": - err = unpopulateTimeRFC3339(val, "CommitTime", &a.CommitTime) + err = unpopulateDateTimeRFC3339(val, "CommitTime", &a.CommitTime) delete(rawMsg, key) case "configurationGroups": err = unpopulate(val, "ConfigurationGroups", &a.ConfigurationGroups) @@ -244,7 +244,7 @@ func (a *ActiveConnectivityConfigurationsListResult) UnmarshalJSON(data []byte) // MarshalJSON implements the json.Marshaller interface for type ActiveDefaultSecurityAdminRule. func (a ActiveDefaultSecurityAdminRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "commitTime", a.CommitTime) + populateDateTimeRFC3339(objectMap, "commitTime", a.CommitTime) populate(objectMap, "configurationDescription", a.ConfigurationDescription) populate(objectMap, "id", a.ID) objectMap["kind"] = EffectiveAdminRuleKindDefault @@ -266,7 +266,7 @@ func (a *ActiveDefaultSecurityAdminRule) UnmarshalJSON(data []byte) error { var err error switch key { case "commitTime": - err = unpopulateTimeRFC3339(val, "CommitTime", &a.CommitTime) + err = unpopulateDateTimeRFC3339(val, "CommitTime", &a.CommitTime) delete(rawMsg, key) case "configurationDescription": err = unpopulate(val, "ConfigurationDescription", &a.ConfigurationDescription) @@ -303,7 +303,7 @@ func (a *ActiveDefaultSecurityAdminRule) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ActiveSecurityAdminRule. func (a ActiveSecurityAdminRule) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "commitTime", a.CommitTime) + populateDateTimeRFC3339(objectMap, "commitTime", a.CommitTime) populate(objectMap, "configurationDescription", a.ConfigurationDescription) populate(objectMap, "id", a.ID) objectMap["kind"] = EffectiveAdminRuleKindCustom @@ -325,7 +325,7 @@ func (a *ActiveSecurityAdminRule) UnmarshalJSON(data []byte) error { var err error switch key { case "commitTime": - err = unpopulateTimeRFC3339(val, "CommitTime", &a.CommitTime) + err = unpopulateDateTimeRFC3339(val, "CommitTime", &a.CommitTime) delete(rawMsg, key) case "configurationDescription": err = unpopulate(val, "ConfigurationDescription", &a.ConfigurationDescription) @@ -6488,7 +6488,7 @@ func (a *AzureReachabilityReportItem) UnmarshalJSON(data []byte) error { func (a AzureReachabilityReportLatencyInfo) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "score", a.Score) - populateTimeRFC3339(objectMap, "timeStamp", a.TimeStamp) + populateDateTimeRFC3339(objectMap, "timeStamp", a.TimeStamp) return json.Marshal(objectMap) } @@ -6505,7 +6505,7 @@ func (a *AzureReachabilityReportLatencyInfo) UnmarshalJSON(data []byte) error { err = unpopulate(val, "Score", &a.Score) delete(rawMsg, key) case "timeStamp": - err = unpopulateTimeRFC3339(val, "TimeStamp", &a.TimeStamp) + err = unpopulateDateTimeRFC3339(val, "TimeStamp", &a.TimeStamp) delete(rawMsg, key) } if err != nil { @@ -6554,10 +6554,10 @@ func (a *AzureReachabilityReportLocation) UnmarshalJSON(data []byte) error { func (a AzureReachabilityReportParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "azureLocations", a.AzureLocations) - populateTimeRFC3339(objectMap, "endTime", a.EndTime) + populateDateTimeRFC3339(objectMap, "endTime", a.EndTime) populate(objectMap, "providerLocation", a.ProviderLocation) populate(objectMap, "providers", a.Providers) - populateTimeRFC3339(objectMap, "startTime", a.StartTime) + populateDateTimeRFC3339(objectMap, "startTime", a.StartTime) return json.Marshal(objectMap) } @@ -6574,7 +6574,7 @@ func (a *AzureReachabilityReportParameters) UnmarshalJSON(data []byte) error { err = unpopulate(val, "AzureLocations", &a.AzureLocations) delete(rawMsg, key) case "endTime": - err = unpopulateTimeRFC3339(val, "EndTime", &a.EndTime) + err = unpopulateDateTimeRFC3339(val, "EndTime", &a.EndTime) delete(rawMsg, key) case "providerLocation": err = unpopulate(val, "ProviderLocation", &a.ProviderLocation) @@ -6583,7 +6583,7 @@ func (a *AzureReachabilityReportParameters) UnmarshalJSON(data []byte) error { err = unpopulate(val, "Providers", &a.Providers) delete(rawMsg, key) case "startTime": - err = unpopulateTimeRFC3339(val, "StartTime", &a.StartTime) + err = unpopulateDateTimeRFC3339(val, "StartTime", &a.StartTime) delete(rawMsg, key) } if err != nil { @@ -8598,7 +8598,7 @@ func (c ConnectionMonitorResultProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "outputs", c.Outputs) populate(objectMap, "provisioningState", c.ProvisioningState) populate(objectMap, "source", c.Source) - populateTimeRFC3339(objectMap, "startTime", c.StartTime) + populateDateTimeRFC3339(objectMap, "startTime", c.StartTime) populate(objectMap, "testConfigurations", c.TestConfigurations) populate(objectMap, "testGroups", c.TestGroups) return json.Marshal(objectMap) @@ -8644,7 +8644,7 @@ func (c *ConnectionMonitorResultProperties) UnmarshalJSON(data []byte) error { err = unpopulate(val, "Source", &c.Source) delete(rawMsg, key) case "startTime": - err = unpopulateTimeRFC3339(val, "StartTime", &c.StartTime) + err = unpopulateDateTimeRFC3339(val, "StartTime", &c.StartTime) delete(rawMsg, key) case "testConfigurations": err = unpopulate(val, "TestConfigurations", &c.TestConfigurations) @@ -8945,14 +8945,14 @@ func (c ConnectionStateSnapshot) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "avgLatencyInMs", c.AvgLatencyInMs) populate(objectMap, "connectionState", c.ConnectionState) - populateTimeRFC3339(objectMap, "endTime", c.EndTime) + populateDateTimeRFC3339(objectMap, "endTime", c.EndTime) populate(objectMap, "evaluationState", c.EvaluationState) populate(objectMap, "hops", c.Hops) populate(objectMap, "maxLatencyInMs", c.MaxLatencyInMs) populate(objectMap, "minLatencyInMs", c.MinLatencyInMs) populate(objectMap, "probesFailed", c.ProbesFailed) populate(objectMap, "probesSent", c.ProbesSent) - populateTimeRFC3339(objectMap, "startTime", c.StartTime) + populateDateTimeRFC3339(objectMap, "startTime", c.StartTime) return json.Marshal(objectMap) } @@ -8972,7 +8972,7 @@ func (c *ConnectionStateSnapshot) UnmarshalJSON(data []byte) error { err = unpopulate(val, "ConnectionState", &c.ConnectionState) delete(rawMsg, key) case "endTime": - err = unpopulateTimeRFC3339(val, "EndTime", &c.EndTime) + err = unpopulateDateTimeRFC3339(val, "EndTime", &c.EndTime) delete(rawMsg, key) case "evaluationState": err = unpopulate(val, "EvaluationState", &c.EvaluationState) @@ -8993,7 +8993,7 @@ func (c *ConnectionStateSnapshot) UnmarshalJSON(data []byte) error { err = unpopulate(val, "ProbesSent", &c.ProbesSent) delete(rawMsg, key) case "startTime": - err = unpopulateTimeRFC3339(val, "StartTime", &c.StartTime) + err = unpopulateDateTimeRFC3339(val, "StartTime", &c.StartTime) delete(rawMsg, key) } if err != nil { @@ -20217,7 +20217,7 @@ func (m *ManagerConnectionProperties) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ManagerDeploymentStatus. func (m ManagerDeploymentStatus) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "commitTime", m.CommitTime) + populateDateTimeRFC3339(objectMap, "commitTime", m.CommitTime) populate(objectMap, "configurationIds", m.ConfigurationIDs) populate(objectMap, "deploymentStatus", m.DeploymentStatus) populate(objectMap, "deploymentType", m.DeploymentType) @@ -20236,7 +20236,7 @@ func (m *ManagerDeploymentStatus) UnmarshalJSON(data []byte) error { var err error switch key { case "commitTime": - err = unpopulateTimeRFC3339(val, "CommitTime", &m.CommitTime) + err = unpopulateDateTimeRFC3339(val, "CommitTime", &m.CommitTime) delete(rawMsg, key) case "configurationIds": err = unpopulate(val, "ConfigurationIDs", &m.ConfigurationIDs) @@ -22013,7 +22013,7 @@ func (p *PacketCaptureParameters) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type PacketCaptureQueryStatusResult. func (p PacketCaptureQueryStatusResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "captureStartTime", p.CaptureStartTime) + populateDateTimeRFC3339(objectMap, "captureStartTime", p.CaptureStartTime) populate(objectMap, "id", p.ID) populate(objectMap, "name", p.Name) populate(objectMap, "packetCaptureError", p.PacketCaptureError) @@ -22032,7 +22032,7 @@ func (p *PacketCaptureQueryStatusResult) UnmarshalJSON(data []byte) error { var err error switch key { case "captureStartTime": - err = unpopulateTimeRFC3339(val, "CaptureStartTime", &p.CaptureStartTime) + err = unpopulateDateTimeRFC3339(val, "CaptureStartTime", &p.CaptureStartTime) delete(rawMsg, key) case "id": err = unpopulate(val, "ID", &p.ID) @@ -28056,10 +28056,10 @@ func (s *SwapResourceProperties) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type SystemData. func (s SystemData) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) + populateDateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) populate(objectMap, "createdBy", s.CreatedBy) populate(objectMap, "createdByType", s.CreatedByType) - populateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) + populateDateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) populate(objectMap, "lastModifiedBy", s.LastModifiedBy) populate(objectMap, "lastModifiedByType", s.LastModifiedByType) return json.Marshal(objectMap) @@ -28075,7 +28075,7 @@ func (s *SystemData) UnmarshalJSON(data []byte) error { var err error switch key { case "createdAt": - err = unpopulateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) + err = unpopulateDateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) delete(rawMsg, key) case "createdBy": err = unpopulate(val, "CreatedBy", &s.CreatedBy) @@ -28084,7 +28084,7 @@ func (s *SystemData) UnmarshalJSON(data []byte) error { err = unpopulate(val, "CreatedByType", &s.CreatedByType) delete(rawMsg, key) case "lastModifiedAt": - err = unpopulateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) + err = unpopulateDateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) delete(rawMsg, key) case "lastModifiedBy": err = unpopulate(val, "LastModifiedBy", &s.LastModifiedBy) @@ -28130,9 +28130,9 @@ func (t *TagsObject) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type Topology. func (t Topology) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeRFC3339(objectMap, "createdDateTime", t.CreatedDateTime) + populateDateTimeRFC3339(objectMap, "createdDateTime", t.CreatedDateTime) populate(objectMap, "id", t.ID) - populateTimeRFC3339(objectMap, "lastModified", t.LastModified) + populateDateTimeRFC3339(objectMap, "lastModified", t.LastModified) populate(objectMap, "resources", t.Resources) return json.Marshal(objectMap) } @@ -28147,13 +28147,13 @@ func (t *Topology) UnmarshalJSON(data []byte) error { var err error switch key { case "createdDateTime": - err = unpopulateTimeRFC3339(val, "CreatedDateTime", &t.CreatedDateTime) + err = unpopulateDateTimeRFC3339(val, "CreatedDateTime", &t.CreatedDateTime) delete(rawMsg, key) case "id": err = unpopulate(val, "ID", &t.ID) delete(rawMsg, key) case "lastModified": - err = unpopulateTimeRFC3339(val, "LastModified", &t.LastModified) + err = unpopulateDateTimeRFC3339(val, "LastModified", &t.LastModified) delete(rawMsg, key) case "resources": err = unpopulate(val, "Resources", &t.Resources) @@ -28524,9 +28524,9 @@ func (t *TroubleshootingRecommendedActions) UnmarshalJSON(data []byte) error { func (t TroubleshootingResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "code", t.Code) - populateTimeRFC3339(objectMap, "endTime", t.EndTime) + populateDateTimeRFC3339(objectMap, "endTime", t.EndTime) populate(objectMap, "results", t.Results) - populateTimeRFC3339(objectMap, "startTime", t.StartTime) + populateDateTimeRFC3339(objectMap, "startTime", t.StartTime) return json.Marshal(objectMap) } @@ -28543,13 +28543,13 @@ func (t *TroubleshootingResult) UnmarshalJSON(data []byte) error { err = unpopulate(val, "Code", &t.Code) delete(rawMsg, key) case "endTime": - err = unpopulateTimeRFC3339(val, "EndTime", &t.EndTime) + err = unpopulateDateTimeRFC3339(val, "EndTime", &t.EndTime) delete(rawMsg, key) case "results": err = unpopulate(val, "Results", &t.Results) delete(rawMsg, key) case "startTime": - err = unpopulateTimeRFC3339(val, "StartTime", &t.StartTime) + err = unpopulateDateTimeRFC3339(val, "StartTime", &t.StartTime) delete(rawMsg, key) } if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natgateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natgateways_client.go index 01ebb2333..d7cf68098 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natgateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natgateways_client.go @@ -33,7 +33,7 @@ type NatGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewNatGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*NatGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".NatGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *NatGatewaysClient) BeginCreateOrUpdate(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NatGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[NatGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NatGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *NatGatewaysClient) BeginCreateOrUpdate(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *NatGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, natGatewayName string, parameters NatGateway, options *NatGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "NatGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, natGatewayName, parameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *NatGatewaysClient) BeginDelete(ctx context.Context, resourceGroupN } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NatGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[NatGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NatGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *NatGatewaysClient) BeginDelete(ctx context.Context, resourceGroupN // Generated from API version 2023-05-01 func (client *NatGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, natGatewayName string, options *NatGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "NatGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, natGatewayName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *NatGatewaysClient) deleteCreateRequest(ctx context.Context, resour // - options - NatGatewaysClientGetOptions contains the optional parameters for the NatGatewaysClient.Get method. func (client *NatGatewaysClient) Get(ctx context.Context, resourceGroupName string, natGatewayName string, options *NatGatewaysClientGetOptions) (NatGatewaysClientGetResponse, error) { var err error + const operationName = "NatGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, natGatewayName, options) if err != nil { return NatGatewaysClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *NatGatewaysClient) NewListPager(resourceGroupName string, options return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *NatGatewaysClientListResponse) (NatGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return NatGatewaysClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NatGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return NatGatewaysClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return NatGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -324,25 +337,20 @@ func (client *NatGatewaysClient) NewListAllPager(options *NatGatewaysClientListA return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *NatGatewaysClientListAllResponse) (NatGatewaysClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NatGatewaysClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return NatGatewaysClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return NatGatewaysClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return NatGatewaysClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -383,6 +391,10 @@ func (client *NatGatewaysClient) listAllHandleResponse(resp *http.Response) (Nat // - options - NatGatewaysClientUpdateTagsOptions contains the optional parameters for the NatGatewaysClient.UpdateTags method. func (client *NatGatewaysClient) UpdateTags(ctx context.Context, resourceGroupName string, natGatewayName string, parameters TagsObject, options *NatGatewaysClientUpdateTagsOptions) (NatGatewaysClientUpdateTagsResponse, error) { var err error + const operationName = "NatGatewaysClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, natGatewayName, parameters, options) if err != nil { return NatGatewaysClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natrules_client.go index 3ba041629..3bfc0b6ae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natrules_client.go @@ -33,7 +33,7 @@ type NatRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewNatRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*NatRulesClient, error) { - cl, err := arm.NewClient(moduleName+".NatRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *NatRulesClient) BeginCreateOrUpdate(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NatRulesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[NatRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NatRulesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *NatRulesClient) BeginCreateOrUpdate(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *NatRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, natRuleParameters VPNGatewayNatRule, options *NatRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "NatRulesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, gatewayName, natRuleName, natRuleParameters, options) if err != nil { return nil, err @@ -139,10 +146,13 @@ func (client *NatRulesClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NatRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[NatRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NatRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -152,6 +162,10 @@ func (client *NatRulesClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *NatRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, options *NatRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "NatRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, gatewayName, natRuleName, options) if err != nil { return nil, err @@ -207,6 +221,10 @@ func (client *NatRulesClient) deleteCreateRequest(ctx context.Context, resourceG // - options - NatRulesClientGetOptions contains the optional parameters for the NatRulesClient.Get method. func (client *NatRulesClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, natRuleName string, options *NatRulesClientGetOptions) (NatRulesClientGetResponse, error) { var err error + const operationName = "NatRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, gatewayName, natRuleName, options) if err != nil { return NatRulesClientGetResponse{}, err @@ -275,25 +293,20 @@ func (client *NatRulesClient) NewListByVPNGatewayPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *NatRulesClientListByVPNGatewayResponse) (NatRulesClientListByVPNGatewayResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVPNGatewayCreateRequest(ctx, resourceGroupName, gatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return NatRulesClientListByVPNGatewayResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NatRulesClient.NewListByVPNGatewayPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVPNGatewayCreateRequest(ctx, resourceGroupName, gatewayName, options) + }, nil) if err != nil { return NatRulesClientListByVPNGatewayResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return NatRulesClientListByVPNGatewayResponse{}, runtime.NewResponseError(resp) - } return client.listByVPNGatewayHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/operations_client.go index 49c25ba18..3312c5ba4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/operations_client.go @@ -27,7 +27,7 @@ type OperationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*OperationsClient, error) { - cl, err := arm.NewClient(moduleName+".OperationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -47,25 +47,20 @@ func (client *OperationsClient) NewListPager(options *OperationsClientListOption return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *OperationsClientListResponse) (OperationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "OperationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return OperationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return OperationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return OperationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/p2svpngateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/p2svpngateways_client.go index 546cf35c6..112e1e5c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/p2svpngateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/p2svpngateways_client.go @@ -33,7 +33,7 @@ type P2SVPNGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewP2SVPNGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*P2SVPNGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".P2SVPNGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *P2SVPNGatewaysClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *P2SVPNGatewaysClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, gatewayName string, p2SVPNGatewayParameters P2SVPNGateway, options *P2SVPNGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, gatewayName, p2SVPNGatewayParameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *P2SVPNGatewaysClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *P2SVPNGatewaysClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, gatewayName string, options *P2SVPNGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -206,10 +220,13 @@ func (client *P2SVPNGatewaysClient) BeginDisconnectP2SVPNConnections(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientDisconnectP2SVPNConnectionsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -220,6 +237,10 @@ func (client *P2SVPNGatewaysClient) BeginDisconnectP2SVPNConnections(ctx context // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) disconnectP2SVPNConnections(ctx context.Context, resourceGroupName string, p2SVPNGatewayName string, request P2SVPNConnectionRequest, options *P2SVPNGatewaysClientBeginDisconnectP2SVPNConnectionsOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginDisconnectP2SVPNConnections" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.disconnectP2SVPNConnectionsCreateRequest(ctx, resourceGroupName, p2SVPNGatewayName, request, options) if err != nil { return nil, err @@ -281,10 +302,13 @@ func (client *P2SVPNGatewaysClient) BeginGenerateVPNProfile(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientGenerateVPNProfileResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientGenerateVPNProfileResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientGenerateVPNProfileResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -294,6 +318,10 @@ func (client *P2SVPNGatewaysClient) BeginGenerateVPNProfile(ctx context.Context, // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) generateVPNProfile(ctx context.Context, resourceGroupName string, gatewayName string, parameters P2SVPNProfileParameters, options *P2SVPNGatewaysClientBeginGenerateVPNProfileOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginGenerateVPNProfile" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.generateVPNProfileCreateRequest(ctx, resourceGroupName, gatewayName, parameters, options) if err != nil { return nil, err @@ -347,6 +375,10 @@ func (client *P2SVPNGatewaysClient) generateVPNProfileCreateRequest(ctx context. // - options - P2SVPNGatewaysClientGetOptions contains the optional parameters for the P2SVPNGatewaysClient.Get method. func (client *P2SVPNGatewaysClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, options *P2SVPNGatewaysClientGetOptions) (P2SVPNGatewaysClientGetResponse, error) { var err error + const operationName = "P2SVPNGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return P2SVPNGatewaysClientGetResponse{}, err @@ -415,10 +447,13 @@ func (client *P2SVPNGatewaysClient) BeginGetP2SVPNConnectionHealth(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientGetP2SVPNConnectionHealthResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -429,6 +464,10 @@ func (client *P2SVPNGatewaysClient) BeginGetP2SVPNConnectionHealth(ctx context.C // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) getP2SVPNConnectionHealth(ctx context.Context, resourceGroupName string, gatewayName string, options *P2SVPNGatewaysClientBeginGetP2SVPNConnectionHealthOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealth" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getP2SVPNConnectionHealthCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -488,10 +527,13 @@ func (client *P2SVPNGatewaysClient) BeginGetP2SVPNConnectionHealthDetailed(ctx c } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -502,6 +544,10 @@ func (client *P2SVPNGatewaysClient) BeginGetP2SVPNConnectionHealthDetailed(ctx c // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) getP2SVPNConnectionHealthDetailed(ctx context.Context, resourceGroupName string, gatewayName string, request P2SVPNConnectionHealthRequest, options *P2SVPNGatewaysClientBeginGetP2SVPNConnectionHealthDetailedOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginGetP2SVPNConnectionHealthDetailed" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getP2SVPNConnectionHealthDetailedCreateRequest(ctx, resourceGroupName, gatewayName, request, options) if err != nil { return nil, err @@ -556,25 +602,20 @@ func (client *P2SVPNGatewaysClient) NewListPager(options *P2SVPNGatewaysClientLi return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *P2SVPNGatewaysClientListResponse) (P2SVPNGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return P2SVPNGatewaysClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "P2SVPNGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return P2SVPNGatewaysClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return P2SVPNGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -617,25 +658,20 @@ func (client *P2SVPNGatewaysClient) NewListByResourceGroupPager(resourceGroupNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *P2SVPNGatewaysClientListByResourceGroupResponse) (P2SVPNGatewaysClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return P2SVPNGatewaysClientListByResourceGroupResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "P2SVPNGatewaysClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return P2SVPNGatewaysClientListByResourceGroupResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return P2SVPNGatewaysClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -686,10 +722,13 @@ func (client *P2SVPNGatewaysClient) BeginReset(ctx context.Context, resourceGrou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientResetResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientResetResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientResetResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -699,6 +738,10 @@ func (client *P2SVPNGatewaysClient) BeginReset(ctx context.Context, resourceGrou // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) reset(ctx context.Context, resourceGroupName string, gatewayName string, options *P2SVPNGatewaysClientBeginResetOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginReset" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -757,10 +800,13 @@ func (client *P2SVPNGatewaysClient) BeginUpdateTags(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[P2SVPNGatewaysClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[P2SVPNGatewaysClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[P2SVPNGatewaysClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -770,6 +816,10 @@ func (client *P2SVPNGatewaysClient) BeginUpdateTags(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *P2SVPNGatewaysClient) updateTags(ctx context.Context, resourceGroupName string, gatewayName string, p2SVPNGatewayParameters TagsObject, options *P2SVPNGatewaysClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "P2SVPNGatewaysClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, gatewayName, p2SVPNGatewayParameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/packetcaptures_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/packetcaptures_client.go index 913af957f..3b19453fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/packetcaptures_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/packetcaptures_client.go @@ -33,7 +33,7 @@ type PacketCapturesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPacketCapturesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PacketCapturesClient, error) { - cl, err := arm.NewClient(moduleName+".PacketCapturesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *PacketCapturesClient) BeginCreate(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PacketCapturesClientCreateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PacketCapturesClientCreateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PacketCapturesClientCreateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *PacketCapturesClient) BeginCreate(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *PacketCapturesClient) create(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, parameters PacketCapture, options *PacketCapturesClientBeginCreateOptions) (*http.Response, error) { var err error + const operationName = "PacketCapturesClient.BeginCreate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createCreateRequest(ctx, resourceGroupName, networkWatcherName, packetCaptureName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *PacketCapturesClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PacketCapturesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PacketCapturesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PacketCapturesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *PacketCapturesClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *PacketCapturesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *PacketCapturesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PacketCapturesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkWatcherName, packetCaptureName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *PacketCapturesClient) deleteCreateRequest(ctx context.Context, res // - options - PacketCapturesClientGetOptions contains the optional parameters for the PacketCapturesClient.Get method. func (client *PacketCapturesClient) Get(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *PacketCapturesClientGetOptions) (PacketCapturesClientGetResponse, error) { var err error + const operationName = "PacketCapturesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkWatcherName, packetCaptureName, options) if err != nil { return PacketCapturesClientGetResponse{}, err @@ -280,10 +298,13 @@ func (client *PacketCapturesClient) BeginGetStatus(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PacketCapturesClientGetStatusResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PacketCapturesClientGetStatusResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PacketCapturesClientGetStatusResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -293,6 +314,10 @@ func (client *PacketCapturesClient) BeginGetStatus(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *PacketCapturesClient) getStatus(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *PacketCapturesClientBeginGetStatusOptions) (*http.Response, error) { var err error + const operationName = "PacketCapturesClient.BeginGetStatus" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getStatusCreateRequest(ctx, resourceGroupName, networkWatcherName, packetCaptureName, options) if err != nil { return nil, err @@ -350,6 +375,7 @@ func (client *PacketCapturesClient) NewListPager(resourceGroupName string, netwo return false }, Fetcher: func(ctx context.Context, page *PacketCapturesClientListResponse) (PacketCapturesClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PacketCapturesClient.NewListPager") req, err := client.listCreateRequest(ctx, resourceGroupName, networkWatcherName, options) if err != nil { return PacketCapturesClientListResponse{}, err @@ -363,6 +389,7 @@ func (client *PacketCapturesClient) NewListPager(resourceGroupName string, netwo } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -418,10 +445,13 @@ func (client *PacketCapturesClient) BeginStop(ctx context.Context, resourceGroup } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PacketCapturesClientStopResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PacketCapturesClientStopResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PacketCapturesClientStopResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -431,6 +461,10 @@ func (client *PacketCapturesClient) BeginStop(ctx context.Context, resourceGroup // Generated from API version 2023-05-01 func (client *PacketCapturesClient) stop(ctx context.Context, resourceGroupName string, networkWatcherName string, packetCaptureName string, options *PacketCapturesClientBeginStopOptions) (*http.Response, error) { var err error + const operationName = "PacketCapturesClient.BeginStop" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopCreateRequest(ctx, resourceGroupName, networkWatcherName, packetCaptureName, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/peerexpressroutecircuitconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/peerexpressroutecircuitconnections_client.go index b768b6bd5..d8a87d68c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/peerexpressroutecircuitconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/peerexpressroutecircuitconnections_client.go @@ -33,7 +33,7 @@ type PeerExpressRouteCircuitConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPeerExpressRouteCircuitConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PeerExpressRouteCircuitConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".PeerExpressRouteCircuitConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,6 +56,10 @@ func NewPeerExpressRouteCircuitConnectionsClient(subscriptionID string, credenti // method. func (client *PeerExpressRouteCircuitConnectionsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *PeerExpressRouteCircuitConnectionsClientGetOptions) (PeerExpressRouteCircuitConnectionsClientGetResponse, error) { var err error + const operationName = "PeerExpressRouteCircuitConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, circuitName, peeringName, connectionName, options) if err != nil { return PeerExpressRouteCircuitConnectionsClientGetResponse{}, err @@ -129,25 +133,20 @@ func (client *PeerExpressRouteCircuitConnectionsClient) NewListPager(resourceGro return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PeerExpressRouteCircuitConnectionsClientListResponse) (PeerExpressRouteCircuitConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PeerExpressRouteCircuitConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, circuitName, peeringName, options) + }, nil) if err != nil { return PeerExpressRouteCircuitConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PeerExpressRouteCircuitConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PeerExpressRouteCircuitConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatednszonegroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatednszonegroups_client.go index 917f1d200..e4177612b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatednszonegroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatednszonegroups_client.go @@ -33,7 +33,7 @@ type PrivateDNSZoneGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPrivateDNSZoneGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrivateDNSZoneGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".PrivateDNSZoneGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *PrivateDNSZoneGroupsClient) BeginCreateOrUpdate(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateDNSZoneGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateDNSZoneGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateDNSZoneGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *PrivateDNSZoneGroupsClient) BeginCreateOrUpdate(ctx context.Contex // Generated from API version 2023-05-01 func (client *PrivateDNSZoneGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters PrivateDNSZoneGroup, options *PrivateDNSZoneGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "PrivateDNSZoneGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *PrivateDNSZoneGroupsClient) BeginDelete(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateDNSZoneGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateDNSZoneGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateDNSZoneGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *PrivateDNSZoneGroupsClient) BeginDelete(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *PrivateDNSZoneGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *PrivateDNSZoneGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PrivateDNSZoneGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *PrivateDNSZoneGroupsClient) deleteCreateRequest(ctx context.Contex // method. func (client *PrivateDNSZoneGroupsClient) Get(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *PrivateDNSZoneGroupsClientGetOptions) (PrivateDNSZoneGroupsClientGetResponse, error) { var err error + const operationName = "PrivateDNSZoneGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, privateEndpointName, privateDNSZoneGroupName, options) if err != nil { return PrivateDNSZoneGroupsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *PrivateDNSZoneGroupsClient) NewListPager(privateEndpointName strin return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateDNSZoneGroupsClientListResponse) (PrivateDNSZoneGroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, privateEndpointName, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PrivateDNSZoneGroupsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateDNSZoneGroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, privateEndpointName, resourceGroupName, options) + }, nil) if err != nil { return PrivateDNSZoneGroupsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateDNSZoneGroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privateendpoints_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privateendpoints_client.go index eda505d6c..496153f08 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privateendpoints_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privateendpoints_client.go @@ -33,7 +33,7 @@ type PrivateEndpointsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPrivateEndpointsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrivateEndpointsClient, error) { - cl, err := arm.NewClient(moduleName+".PrivateEndpointsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *PrivateEndpointsClient) BeginCreateOrUpdate(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateEndpointsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateEndpointsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateEndpointsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *PrivateEndpointsClient) BeginCreateOrUpdate(ctx context.Context, r // Generated from API version 2023-05-01 func (client *PrivateEndpointsClient) createOrUpdate(ctx context.Context, resourceGroupName string, privateEndpointName string, parameters PrivateEndpoint, options *PrivateEndpointsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "PrivateEndpointsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, privateEndpointName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *PrivateEndpointsClient) BeginDelete(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateEndpointsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateEndpointsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateEndpointsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *PrivateEndpointsClient) BeginDelete(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *PrivateEndpointsClient) deleteOperation(ctx context.Context, resourceGroupName string, privateEndpointName string, options *PrivateEndpointsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PrivateEndpointsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, privateEndpointName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *PrivateEndpointsClient) deleteCreateRequest(ctx context.Context, r // - options - PrivateEndpointsClientGetOptions contains the optional parameters for the PrivateEndpointsClient.Get method. func (client *PrivateEndpointsClient) Get(ctx context.Context, resourceGroupName string, privateEndpointName string, options *PrivateEndpointsClientGetOptions) (PrivateEndpointsClientGetResponse, error) { var err error + const operationName = "PrivateEndpointsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, privateEndpointName, options) if err != nil { return PrivateEndpointsClientGetResponse{}, err @@ -263,25 +281,20 @@ func (client *PrivateEndpointsClient) NewListPager(resourceGroupName string, opt return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateEndpointsClientListResponse) (PrivateEndpointsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PrivateEndpointsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateEndpointsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return PrivateEndpointsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateEndpointsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -327,25 +340,20 @@ func (client *PrivateEndpointsClient) NewListBySubscriptionPager(options *Privat return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateEndpointsClientListBySubscriptionResponse) (PrivateEndpointsClientListBySubscriptionResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listBySubscriptionCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateEndpointsClient.NewListBySubscriptionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listBySubscriptionCreateRequest(ctx, options) + }, nil) if err != nil { return PrivateEndpointsClientListBySubscriptionResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PrivateEndpointsClientListBySubscriptionResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateEndpointsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) - } return client.listBySubscriptionHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatelinkservices_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatelinkservices_client.go index b8574fe6f..703fda2d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatelinkservices_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatelinkservices_client.go @@ -33,7 +33,7 @@ type PrivateLinkServicesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPrivateLinkServicesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrivateLinkServicesClient, error) { - cl, err := arm.NewClient(moduleName+".PrivateLinkServicesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -60,10 +60,13 @@ func (client *PrivateLinkServicesClient) BeginCheckPrivateLinkServiceVisibility( } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -73,6 +76,10 @@ func (client *PrivateLinkServicesClient) BeginCheckPrivateLinkServiceVisibility( // Generated from API version 2023-05-01 func (client *PrivateLinkServicesClient) checkPrivateLinkServiceVisibility(ctx context.Context, location string, parameters CheckPrivateLinkServiceVisibilityRequest, options *PrivateLinkServicesClientBeginCheckPrivateLinkServiceVisibilityOptions) (*http.Response, error) { var err error + const operationName = "PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibility" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.checkPrivateLinkServiceVisibilityCreateRequest(ctx, location, parameters, options) if err != nil { return nil, err @@ -131,10 +138,13 @@ func (client *PrivateLinkServicesClient) BeginCheckPrivateLinkServiceVisibilityB } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateLinkServicesClientCheckPrivateLinkServiceVisibilityByResourceGroupResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -145,6 +155,10 @@ func (client *PrivateLinkServicesClient) BeginCheckPrivateLinkServiceVisibilityB // Generated from API version 2023-05-01 func (client *PrivateLinkServicesClient) checkPrivateLinkServiceVisibilityByResourceGroup(ctx context.Context, location string, resourceGroupName string, parameters CheckPrivateLinkServiceVisibilityRequest, options *PrivateLinkServicesClientBeginCheckPrivateLinkServiceVisibilityByResourceGroupOptions) (*http.Response, error) { var err error + const operationName = "PrivateLinkServicesClient.BeginCheckPrivateLinkServiceVisibilityByResourceGroup" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.checkPrivateLinkServiceVisibilityByResourceGroupCreateRequest(ctx, location, resourceGroupName, parameters, options) if err != nil { return nil, err @@ -206,10 +220,13 @@ func (client *PrivateLinkServicesClient) BeginCreateOrUpdate(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateLinkServicesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateLinkServicesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateLinkServicesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -219,6 +236,10 @@ func (client *PrivateLinkServicesClient) BeginCreateOrUpdate(ctx context.Context // Generated from API version 2023-05-01 func (client *PrivateLinkServicesClient) createOrUpdate(ctx context.Context, resourceGroupName string, serviceName string, parameters PrivateLinkService, options *PrivateLinkServicesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "PrivateLinkServicesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, serviceName, parameters, options) if err != nil { return nil, err @@ -279,10 +300,13 @@ func (client *PrivateLinkServicesClient) BeginDelete(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateLinkServicesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateLinkServicesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateLinkServicesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -292,6 +316,10 @@ func (client *PrivateLinkServicesClient) BeginDelete(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *PrivateLinkServicesClient) deleteOperation(ctx context.Context, resourceGroupName string, serviceName string, options *PrivateLinkServicesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PrivateLinkServicesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, serviceName, options) if err != nil { return nil, err @@ -350,10 +378,13 @@ func (client *PrivateLinkServicesClient) BeginDeletePrivateEndpointConnection(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateLinkServicesClientDeletePrivateEndpointConnectionResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -363,6 +394,10 @@ func (client *PrivateLinkServicesClient) BeginDeletePrivateEndpointConnection(ct // Generated from API version 2023-05-01 func (client *PrivateLinkServicesClient) deletePrivateEndpointConnection(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, options *PrivateLinkServicesClientBeginDeletePrivateEndpointConnectionOptions) (*http.Response, error) { var err error + const operationName = "PrivateLinkServicesClient.BeginDeletePrivateEndpointConnection" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deletePrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, serviceName, peConnectionName, options) if err != nil { return nil, err @@ -417,6 +452,10 @@ func (client *PrivateLinkServicesClient) deletePrivateEndpointConnectionCreateRe // - options - PrivateLinkServicesClientGetOptions contains the optional parameters for the PrivateLinkServicesClient.Get method. func (client *PrivateLinkServicesClient) Get(ctx context.Context, resourceGroupName string, serviceName string, options *PrivateLinkServicesClientGetOptions) (PrivateLinkServicesClientGetResponse, error) { var err error + const operationName = "PrivateLinkServicesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, serviceName, options) if err != nil { return PrivateLinkServicesClientGetResponse{}, err @@ -483,6 +522,10 @@ func (client *PrivateLinkServicesClient) getHandleResponse(resp *http.Response) // method. func (client *PrivateLinkServicesClient) GetPrivateEndpointConnection(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, options *PrivateLinkServicesClientGetPrivateEndpointConnectionOptions) (PrivateLinkServicesClientGetPrivateEndpointConnectionResponse, error) { var err error + const operationName = "PrivateLinkServicesClient.GetPrivateEndpointConnection" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getPrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, serviceName, peConnectionName, options) if err != nil { return PrivateLinkServicesClientGetPrivateEndpointConnectionResponse{}, err @@ -553,25 +596,20 @@ func (client *PrivateLinkServicesClient) NewListPager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateLinkServicesClientListResponse) (PrivateLinkServicesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateLinkServicesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return PrivateLinkServicesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PrivateLinkServicesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkServicesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -619,25 +657,20 @@ func (client *PrivateLinkServicesClient) NewListAutoApprovedPrivateLinkServicesP return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse) (PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAutoApprovedPrivateLinkServicesCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAutoApprovedPrivateLinkServicesCreateRequest(ctx, location, options) + }, nil) if err != nil { return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesResponse{}, runtime.NewResponseError(resp) - } return client.listAutoApprovedPrivateLinkServicesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -686,25 +719,20 @@ func (client *PrivateLinkServicesClient) NewListAutoApprovedPrivateLinkServicesB return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse) (PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAutoApprovedPrivateLinkServicesByResourceGroupCreateRequest(ctx, location, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateLinkServicesClient.NewListAutoApprovedPrivateLinkServicesByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAutoApprovedPrivateLinkServicesByResourceGroupCreateRequest(ctx, location, resourceGroupName, options) + }, nil) if err != nil { return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listAutoApprovedPrivateLinkServicesByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -754,25 +782,20 @@ func (client *PrivateLinkServicesClient) NewListBySubscriptionPager(options *Pri return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateLinkServicesClientListBySubscriptionResponse) (PrivateLinkServicesClientListBySubscriptionResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listBySubscriptionCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PrivateLinkServicesClientListBySubscriptionResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateLinkServicesClient.NewListBySubscriptionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listBySubscriptionCreateRequest(ctx, options) + }, nil) if err != nil { return PrivateLinkServicesClientListBySubscriptionResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkServicesClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) - } return client.listBySubscriptionHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -816,25 +839,20 @@ func (client *PrivateLinkServicesClient) NewListPrivateEndpointConnectionsPager( return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PrivateLinkServicesClientListPrivateEndpointConnectionsResponse) (PrivateLinkServicesClientListPrivateEndpointConnectionsResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listPrivateEndpointConnectionsCreateRequest(ctx, resourceGroupName, serviceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PrivateLinkServicesClientListPrivateEndpointConnectionsResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateLinkServicesClient.NewListPrivateEndpointConnectionsPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listPrivateEndpointConnectionsCreateRequest(ctx, resourceGroupName, serviceName, options) + }, nil) if err != nil { return PrivateLinkServicesClientListPrivateEndpointConnectionsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PrivateLinkServicesClientListPrivateEndpointConnectionsResponse{}, runtime.NewResponseError(resp) - } return client.listPrivateEndpointConnectionsHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -885,6 +903,10 @@ func (client *PrivateLinkServicesClient) listPrivateEndpointConnectionsHandleRes // method. func (client *PrivateLinkServicesClient) UpdatePrivateEndpointConnection(ctx context.Context, resourceGroupName string, serviceName string, peConnectionName string, parameters PrivateEndpointConnection, options *PrivateLinkServicesClientUpdatePrivateEndpointConnectionOptions) (PrivateLinkServicesClientUpdatePrivateEndpointConnectionResponse, error) { var err error + const operationName = "PrivateLinkServicesClient.UpdatePrivateEndpointConnection" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updatePrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, serviceName, peConnectionName, parameters, options) if err != nil { return PrivateLinkServicesClientUpdatePrivateEndpointConnectionResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/profiles_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/profiles_client.go index eb4a9d028..729f1c036 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/profiles_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/profiles_client.go @@ -33,7 +33,7 @@ type ProfilesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewProfilesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ProfilesClient, error) { - cl, err := arm.NewClient(moduleName+".ProfilesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewProfilesClient(subscriptionID string, credential azcore.TokenCredential, // - options - ProfilesClientCreateOrUpdateOptions contains the optional parameters for the ProfilesClient.CreateOrUpdate method. func (client *ProfilesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkProfileName string, parameters Profile, options *ProfilesClientCreateOrUpdateOptions) (ProfilesClientCreateOrUpdateResponse, error) { var err error + const operationName = "ProfilesClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkProfileName, parameters, options) if err != nil { return ProfilesClientCreateOrUpdateResponse{}, err @@ -123,10 +127,13 @@ func (client *ProfilesClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ProfilesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ProfilesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ProfilesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -136,6 +143,10 @@ func (client *ProfilesClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *ProfilesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkProfileName string, options *ProfilesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ProfilesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkProfileName, options) if err != nil { return nil, err @@ -186,6 +197,10 @@ func (client *ProfilesClient) deleteCreateRequest(ctx context.Context, resourceG // - options - ProfilesClientGetOptions contains the optional parameters for the ProfilesClient.Get method. func (client *ProfilesClient) Get(ctx context.Context, resourceGroupName string, networkProfileName string, options *ProfilesClientGetOptions) (ProfilesClientGetResponse, error) { var err error + const operationName = "ProfilesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkProfileName, options) if err != nil { return ProfilesClientGetResponse{}, err @@ -251,25 +266,20 @@ func (client *ProfilesClient) NewListPager(resourceGroupName string, options *Pr return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ProfilesClientListResponse) (ProfilesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ProfilesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ProfilesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ProfilesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ProfilesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -314,25 +324,20 @@ func (client *ProfilesClient) NewListAllPager(options *ProfilesClientListAllOpti return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ProfilesClientListAllResponse) (ProfilesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ProfilesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return ProfilesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ProfilesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ProfilesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -373,6 +378,10 @@ func (client *ProfilesClient) listAllHandleResponse(resp *http.Response) (Profil // - options - ProfilesClientUpdateTagsOptions contains the optional parameters for the ProfilesClient.UpdateTags method. func (client *ProfilesClient) UpdateTags(ctx context.Context, resourceGroupName string, networkProfileName string, parameters TagsObject, options *ProfilesClientUpdateTagsOptions) (ProfilesClientUpdateTagsResponse, error) { var err error + const operationName = "ProfilesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkProfileName, parameters, options) if err != nil { return ProfilesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipaddresses_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipaddresses_client.go index bdfaa98b7..62d60e062 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipaddresses_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipaddresses_client.go @@ -33,7 +33,7 @@ type PublicIPAddressesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPublicIPAddressesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PublicIPAddressesClient, error) { - cl, err := arm.NewClient(moduleName+".PublicIPAddressesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *PublicIPAddressesClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PublicIPAddressesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PublicIPAddressesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PublicIPAddressesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *PublicIPAddressesClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *PublicIPAddressesClient) createOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, options *PublicIPAddressesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "PublicIPAddressesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, publicIPAddressName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *PublicIPAddressesClient) BeginDdosProtectionStatus(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PublicIPAddressesClientDdosProtectionStatusResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PublicIPAddressesClientDdosProtectionStatusResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PublicIPAddressesClientDdosProtectionStatusResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *PublicIPAddressesClient) BeginDdosProtectionStatus(ctx context.Con // Generated from API version 2023-05-01 func (client *PublicIPAddressesClient) ddosProtectionStatus(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *PublicIPAddressesClientBeginDdosProtectionStatusOptions) (*http.Response, error) { var err error + const operationName = "PublicIPAddressesClient.BeginDdosProtectionStatus" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.ddosProtectionStatusCreateRequest(ctx, resourceGroupName, publicIPAddressName, options) if err != nil { return nil, err @@ -204,10 +218,13 @@ func (client *PublicIPAddressesClient) BeginDelete(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PublicIPAddressesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PublicIPAddressesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PublicIPAddressesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -217,6 +234,10 @@ func (client *PublicIPAddressesClient) BeginDelete(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *PublicIPAddressesClient) deleteOperation(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *PublicIPAddressesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PublicIPAddressesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, publicIPAddressName, options) if err != nil { return nil, err @@ -267,6 +288,10 @@ func (client *PublicIPAddressesClient) deleteCreateRequest(ctx context.Context, // - options - PublicIPAddressesClientGetOptions contains the optional parameters for the PublicIPAddressesClient.Get method. func (client *PublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *PublicIPAddressesClientGetOptions) (PublicIPAddressesClientGetResponse, error) { var err error + const operationName = "PublicIPAddressesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, publicIPAddressName, options) if err != nil { return PublicIPAddressesClientGetResponse{}, err @@ -335,6 +360,10 @@ func (client *PublicIPAddressesClient) getHandleResponse(resp *http.Response) (P // method. func (client *PublicIPAddressesClient) GetCloudServicePublicIPAddress(ctx context.Context, resourceGroupName string, cloudServiceName string, roleInstanceName string, networkInterfaceName string, ipConfigurationName string, publicIPAddressName string, options *PublicIPAddressesClientGetCloudServicePublicIPAddressOptions) (PublicIPAddressesClientGetCloudServicePublicIPAddressResponse, error) { var err error + const operationName = "PublicIPAddressesClient.GetCloudServicePublicIPAddress" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCloudServicePublicIPAddressCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, networkInterfaceName, ipConfigurationName, publicIPAddressName, options) if err != nil { return PublicIPAddressesClientGetCloudServicePublicIPAddressResponse{}, err @@ -419,6 +448,10 @@ func (client *PublicIPAddressesClient) getCloudServicePublicIPAddressHandleRespo // PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress method. func (client *PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, ipConfigurationName string, publicIPAddressName string, options *PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressOptions) (PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse, error) { var err error + const operationName = "PublicIPAddressesClient.GetVirtualMachineScaleSetPublicIPAddress" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVirtualMachineScaleSetPublicIPAddressCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, publicIPAddressName, options) if err != nil { return PublicIPAddressesClientGetVirtualMachineScaleSetPublicIPAddressResponse{}, err @@ -501,25 +534,20 @@ func (client *PublicIPAddressesClient) NewListPager(resourceGroupName string, op return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListResponse) (PublicIPAddressesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return PublicIPAddressesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PublicIPAddressesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -565,25 +593,20 @@ func (client *PublicIPAddressesClient) NewListAllPager(options *PublicIPAddresse return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListAllResponse) (PublicIPAddressesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return PublicIPAddressesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PublicIPAddressesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -627,25 +650,20 @@ func (client *PublicIPAddressesClient) NewListCloudServicePublicIPAddressesPager return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListCloudServicePublicIPAddressesResponse) (PublicIPAddressesClientListCloudServicePublicIPAddressesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCloudServicePublicIPAddressesCreateRequest(ctx, resourceGroupName, cloudServiceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PublicIPAddressesClientListCloudServicePublicIPAddressesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListCloudServicePublicIPAddressesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCloudServicePublicIPAddressesCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + }, nil) if err != nil { return PublicIPAddressesClientListCloudServicePublicIPAddressesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListCloudServicePublicIPAddressesResponse{}, runtime.NewResponseError(resp) - } return client.listCloudServicePublicIPAddressesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -701,25 +719,20 @@ func (client *PublicIPAddressesClient) NewListCloudServiceRoleInstancePublicIPAd return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse) (PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCloudServiceRoleInstancePublicIPAddressesCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, networkInterfaceName, ipConfigurationName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListCloudServiceRoleInstancePublicIPAddressesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCloudServiceRoleInstancePublicIPAddressesCreateRequest(ctx, resourceGroupName, cloudServiceName, roleInstanceName, networkInterfaceName, ipConfigurationName, options) + }, nil) if err != nil { return PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListCloudServiceRoleInstancePublicIPAddressesResponse{}, runtime.NewResponseError(resp) - } return client.listCloudServiceRoleInstancePublicIPAddressesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -784,25 +797,20 @@ func (client *PublicIPAddressesClient) NewListVirtualMachineScaleSetPublicIPAddr return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse) (PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listVirtualMachineScaleSetPublicIPAddressesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListVirtualMachineScaleSetPublicIPAddressesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listVirtualMachineScaleSetPublicIPAddressesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, options) + }, nil) if err != nil { return PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesResponse{}, runtime.NewResponseError(resp) - } return client.listVirtualMachineScaleSetPublicIPAddressesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -858,25 +866,20 @@ func (client *PublicIPAddressesClient) NewListVirtualMachineScaleSetVMPublicIPAd return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse) (PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listVirtualMachineScaleSetVMPublicIPAddressesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPAddressesClient.NewListVirtualMachineScaleSetVMPublicIPAddressesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listVirtualMachineScaleSetVMPublicIPAddressesCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, options) + }, nil) if err != nil { return PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesResponse{}, runtime.NewResponseError(resp) - } return client.listVirtualMachineScaleSetVMPublicIPAddressesHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -938,6 +941,10 @@ func (client *PublicIPAddressesClient) listVirtualMachineScaleSetVMPublicIPAddre // method. func (client *PublicIPAddressesClient) UpdateTags(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters TagsObject, options *PublicIPAddressesClientUpdateTagsOptions) (PublicIPAddressesClientUpdateTagsResponse, error) { var err error + const operationName = "PublicIPAddressesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, publicIPAddressName, parameters, options) if err != nil { return PublicIPAddressesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipprefixes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipprefixes_client.go index 4e832283e..992e20200 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipprefixes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipprefixes_client.go @@ -33,7 +33,7 @@ type PublicIPPrefixesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewPublicIPPrefixesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PublicIPPrefixesClient, error) { - cl, err := arm.NewClient(moduleName+".PublicIPPrefixesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *PublicIPPrefixesClient) BeginCreateOrUpdate(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PublicIPPrefixesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PublicIPPrefixesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PublicIPPrefixesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *PublicIPPrefixesClient) BeginCreateOrUpdate(ctx context.Context, r // Generated from API version 2023-05-01 func (client *PublicIPPrefixesClient) createOrUpdate(ctx context.Context, resourceGroupName string, publicIPPrefixName string, parameters PublicIPPrefix, options *PublicIPPrefixesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "PublicIPPrefixesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, publicIPPrefixName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *PublicIPPrefixesClient) BeginDelete(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PublicIPPrefixesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[PublicIPPrefixesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PublicIPPrefixesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *PublicIPPrefixesClient) BeginDelete(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *PublicIPPrefixesClient) deleteOperation(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *PublicIPPrefixesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "PublicIPPrefixesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, publicIPPrefixName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *PublicIPPrefixesClient) deleteCreateRequest(ctx context.Context, r // - options - PublicIPPrefixesClientGetOptions contains the optional parameters for the PublicIPPrefixesClient.Get method. func (client *PublicIPPrefixesClient) Get(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *PublicIPPrefixesClientGetOptions) (PublicIPPrefixesClientGetResponse, error) { var err error + const operationName = "PublicIPPrefixesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, publicIPPrefixName, options) if err != nil { return PublicIPPrefixesClientGetResponse{}, err @@ -263,25 +281,20 @@ func (client *PublicIPPrefixesClient) NewListPager(resourceGroupName string, opt return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPPrefixesClientListResponse) (PublicIPPrefixesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return PublicIPPrefixesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPPrefixesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return PublicIPPrefixesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPPrefixesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -327,25 +340,20 @@ func (client *PublicIPPrefixesClient) NewListAllPager(options *PublicIPPrefixesC return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *PublicIPPrefixesClientListAllResponse) (PublicIPPrefixesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PublicIPPrefixesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return PublicIPPrefixesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PublicIPPrefixesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PublicIPPrefixesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -387,6 +395,10 @@ func (client *PublicIPPrefixesClient) listAllHandleResponse(resp *http.Response) // method. func (client *PublicIPPrefixesClient) UpdateTags(ctx context.Context, resourceGroupName string, publicIPPrefixName string, parameters TagsObject, options *PublicIPPrefixesClientUpdateTagsOptions) (PublicIPPrefixesClientUpdateTagsResponse, error) { var err error + const operationName = "PublicIPPrefixesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, publicIPPrefixName, parameters, options) if err != nil { return PublicIPPrefixesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/resourcenavigationlinks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/resourcenavigationlinks_client.go index 2c92c2a5d..9968fd01f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/resourcenavigationlinks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/resourcenavigationlinks_client.go @@ -33,7 +33,7 @@ type ResourceNavigationLinksClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewResourceNavigationLinksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ResourceNavigationLinksClient, error) { - cl, err := arm.NewClient(moduleName+".ResourceNavigationLinksClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewResourceNavigationLinksClient(subscriptionID string, credential azcore.T // method. func (client *ResourceNavigationLinksClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *ResourceNavigationLinksClientListOptions) (ResourceNavigationLinksClientListResponse, error) { var err error + const operationName = "ResourceNavigationLinksClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, options) if err != nil { return ResourceNavigationLinksClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilterrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilterrules_client.go index 5badce1f1..641b9f566 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilterrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilterrules_client.go @@ -33,7 +33,7 @@ type RouteFilterRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRouteFilterRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RouteFilterRulesClient, error) { - cl, err := arm.NewClient(moduleName+".RouteFilterRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *RouteFilterRulesClient) BeginCreateOrUpdate(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteFilterRulesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteFilterRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteFilterRulesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *RouteFilterRulesClient) BeginCreateOrUpdate(ctx context.Context, r // Generated from API version 2023-05-01 func (client *RouteFilterRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters RouteFilterRule, options *RouteFilterRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RouteFilterRulesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, routeFilterName, ruleName, routeFilterRuleParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *RouteFilterRulesClient) BeginDelete(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteFilterRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteFilterRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteFilterRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *RouteFilterRulesClient) BeginDelete(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *RouteFilterRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, options *RouteFilterRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RouteFilterRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, routeFilterName, ruleName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *RouteFilterRulesClient) deleteCreateRequest(ctx context.Context, r // - options - RouteFilterRulesClientGetOptions contains the optional parameters for the RouteFilterRulesClient.Get method. func (client *RouteFilterRulesClient) Get(ctx context.Context, resourceGroupName string, routeFilterName string, ruleName string, options *RouteFilterRulesClientGetOptions) (RouteFilterRulesClientGetResponse, error) { var err error + const operationName = "RouteFilterRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, routeFilterName, ruleName, options) if err != nil { return RouteFilterRulesClientGetResponse{}, err @@ -276,25 +294,20 @@ func (client *RouteFilterRulesClient) NewListByRouteFilterPager(resourceGroupNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteFilterRulesClientListByRouteFilterResponse) (RouteFilterRulesClientListByRouteFilterResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByRouteFilterCreateRequest(ctx, resourceGroupName, routeFilterName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RouteFilterRulesClientListByRouteFilterResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteFilterRulesClient.NewListByRouteFilterPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByRouteFilterCreateRequest(ctx, resourceGroupName, routeFilterName, options) + }, nil) if err != nil { return RouteFilterRulesClientListByRouteFilterResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteFilterRulesClientListByRouteFilterResponse{}, runtime.NewResponseError(resp) - } return client.listByRouteFilterHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilters_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilters_client.go index edccbbfdf..58b6b7c29 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilters_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilters_client.go @@ -33,7 +33,7 @@ type RouteFiltersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRouteFiltersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RouteFiltersClient, error) { - cl, err := arm.NewClient(moduleName+".RouteFiltersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *RouteFiltersClient) BeginCreateOrUpdate(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteFiltersClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteFiltersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteFiltersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *RouteFiltersClient) BeginCreateOrUpdate(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *RouteFiltersClient) createOrUpdate(ctx context.Context, resourceGroupName string, routeFilterName string, routeFilterParameters RouteFilter, options *RouteFiltersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RouteFiltersClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, routeFilterName, routeFilterParameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *RouteFiltersClient) BeginDelete(ctx context.Context, resourceGroup } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteFiltersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteFiltersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteFiltersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *RouteFiltersClient) BeginDelete(ctx context.Context, resourceGroup // Generated from API version 2023-05-01 func (client *RouteFiltersClient) deleteOperation(ctx context.Context, resourceGroupName string, routeFilterName string, options *RouteFiltersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RouteFiltersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, routeFilterName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *RouteFiltersClient) deleteCreateRequest(ctx context.Context, resou // - options - RouteFiltersClientGetOptions contains the optional parameters for the RouteFiltersClient.Get method. func (client *RouteFiltersClient) Get(ctx context.Context, resourceGroupName string, routeFilterName string, options *RouteFiltersClientGetOptions) (RouteFiltersClientGetResponse, error) { var err error + const operationName = "RouteFiltersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, routeFilterName, options) if err != nil { return RouteFiltersClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *RouteFiltersClient) NewListPager(options *RouteFiltersClientListOp return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteFiltersClientListResponse) (RouteFiltersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RouteFiltersClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteFiltersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return RouteFiltersClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteFiltersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -322,25 +335,20 @@ func (client *RouteFiltersClient) NewListByResourceGroupPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteFiltersClientListByResourceGroupResponse) (RouteFiltersClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteFiltersClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return RouteFiltersClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return RouteFiltersClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteFiltersClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -385,6 +393,10 @@ func (client *RouteFiltersClient) listByResourceGroupHandleResponse(resp *http.R // - options - RouteFiltersClientUpdateTagsOptions contains the optional parameters for the RouteFiltersClient.UpdateTags method. func (client *RouteFiltersClient) UpdateTags(ctx context.Context, resourceGroupName string, routeFilterName string, parameters TagsObject, options *RouteFiltersClientUpdateTagsOptions) (RouteFiltersClientUpdateTagsResponse, error) { var err error + const operationName = "RouteFiltersClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, routeFilterName, parameters, options) if err != nil { return RouteFiltersClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routemaps_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routemaps_client.go index 66d3f968e..9ccc8abcd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routemaps_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routemaps_client.go @@ -33,7 +33,7 @@ type RouteMapsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRouteMapsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RouteMapsClient, error) { - cl, err := arm.NewClient(moduleName+".RouteMapsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *RouteMapsClient) BeginCreateOrUpdate(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteMapsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteMapsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteMapsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *RouteMapsClient) BeginCreateOrUpdate(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *RouteMapsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, routeMapParameters RouteMap, options *RouteMapsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RouteMapsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, routeMapName, routeMapParameters, options) if err != nil { return nil, err @@ -139,10 +146,13 @@ func (client *RouteMapsClient) BeginDelete(ctx context.Context, resourceGroupNam } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteMapsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteMapsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteMapsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -152,6 +162,10 @@ func (client *RouteMapsClient) BeginDelete(ctx context.Context, resourceGroupNam // Generated from API version 2023-05-01 func (client *RouteMapsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, options *RouteMapsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RouteMapsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, routeMapName, options) if err != nil { return nil, err @@ -207,6 +221,10 @@ func (client *RouteMapsClient) deleteCreateRequest(ctx context.Context, resource // - options - RouteMapsClientGetOptions contains the optional parameters for the RouteMapsClient.Get method. func (client *RouteMapsClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, routeMapName string, options *RouteMapsClientGetOptions) (RouteMapsClientGetResponse, error) { var err error + const operationName = "RouteMapsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, routeMapName, options) if err != nil { return RouteMapsClientGetResponse{}, err @@ -274,25 +292,20 @@ func (client *RouteMapsClient) NewListPager(resourceGroupName string, virtualHub return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteMapsClientListResponse) (RouteMapsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RouteMapsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteMapsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return RouteMapsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteMapsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routes_client.go index 8342ff62d..1574341d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routes_client.go @@ -33,7 +33,7 @@ type RoutesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRoutesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RoutesClient, error) { - cl, err := arm.NewClient(moduleName+".RoutesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *RoutesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RoutesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RoutesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RoutesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *RoutesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *RoutesClient) createOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters Route, options *RoutesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RoutesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, routeTableName, routeName, routeParameters, options) if err != nil { return nil, err @@ -139,10 +146,13 @@ func (client *RoutesClient) BeginDelete(ctx context.Context, resourceGroupName s } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RoutesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RoutesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RoutesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -152,6 +162,10 @@ func (client *RoutesClient) BeginDelete(ctx context.Context, resourceGroupName s // Generated from API version 2023-05-01 func (client *RoutesClient) deleteOperation(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *RoutesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RoutesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, routeTableName, routeName, options) if err != nil { return nil, err @@ -207,6 +221,10 @@ func (client *RoutesClient) deleteCreateRequest(ctx context.Context, resourceGro // - options - RoutesClientGetOptions contains the optional parameters for the RoutesClient.Get method. func (client *RoutesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *RoutesClientGetOptions) (RoutesClientGetResponse, error) { var err error + const operationName = "RoutesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, routeTableName, routeName, options) if err != nil { return RoutesClientGetResponse{}, err @@ -274,25 +292,20 @@ func (client *RoutesClient) NewListPager(resourceGroupName string, routeTableNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RoutesClientListResponse) (RoutesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, routeTableName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RoutesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RoutesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, routeTableName, options) + }, nil) if err != nil { return RoutesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RoutesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routetables_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routetables_client.go index 8559d9359..c3da88450 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routetables_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routetables_client.go @@ -33,7 +33,7 @@ type RouteTablesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRouteTablesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RouteTablesClient, error) { - cl, err := arm.NewClient(moduleName+".RouteTablesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *RouteTablesClient) BeginCreateOrUpdate(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteTablesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteTablesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteTablesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *RouteTablesClient) BeginCreateOrUpdate(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *RouteTablesClient) createOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters RouteTable, options *RouteTablesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RouteTablesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, routeTableName, parameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *RouteTablesClient) BeginDelete(ctx context.Context, resourceGroupN } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RouteTablesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RouteTablesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RouteTablesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *RouteTablesClient) BeginDelete(ctx context.Context, resourceGroupN // Generated from API version 2023-05-01 func (client *RouteTablesClient) deleteOperation(ctx context.Context, resourceGroupName string, routeTableName string, options *RouteTablesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RouteTablesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, routeTableName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *RouteTablesClient) deleteCreateRequest(ctx context.Context, resour // - options - RouteTablesClientGetOptions contains the optional parameters for the RouteTablesClient.Get method. func (client *RouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, options *RouteTablesClientGetOptions) (RouteTablesClientGetResponse, error) { var err error + const operationName = "RouteTablesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, routeTableName, options) if err != nil { return RouteTablesClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *RouteTablesClient) NewListPager(resourceGroupName string, options return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteTablesClientListResponse) (RouteTablesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RouteTablesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteTablesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return RouteTablesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteTablesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -324,25 +337,20 @@ func (client *RouteTablesClient) NewListAllPager(options *RouteTablesClientListA return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RouteTablesClientListAllResponse) (RouteTablesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RouteTablesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return RouteTablesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return RouteTablesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RouteTablesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -383,6 +391,10 @@ func (client *RouteTablesClient) listAllHandleResponse(resp *http.Response) (Rou // - options - RouteTablesClientUpdateTagsOptions contains the optional parameters for the RouteTablesClient.UpdateTags method. func (client *RouteTablesClient) UpdateTags(ctx context.Context, resourceGroupName string, routeTableName string, parameters TagsObject, options *RouteTablesClientUpdateTagsOptions) (RouteTablesClientUpdateTagsResponse, error) { var err error + const operationName = "RouteTablesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, routeTableName, parameters, options) if err != nil { return RouteTablesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routingintent_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routingintent_client.go index 103d8fc9b..14c690fe7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routingintent_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routingintent_client.go @@ -33,7 +33,7 @@ type RoutingIntentClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewRoutingIntentClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RoutingIntentClient, error) { - cl, err := arm.NewClient(moduleName+".RoutingIntentClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *RoutingIntentClient) BeginCreateOrUpdate(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RoutingIntentClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RoutingIntentClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RoutingIntentClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *RoutingIntentClient) BeginCreateOrUpdate(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *RoutingIntentClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, routingIntentParameters RoutingIntent, options *RoutingIntentClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "RoutingIntentClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, routingIntentName, routingIntentParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *RoutingIntentClient) BeginDelete(ctx context.Context, resourceGrou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[RoutingIntentClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[RoutingIntentClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[RoutingIntentClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *RoutingIntentClient) BeginDelete(ctx context.Context, resourceGrou // Generated from API version 2023-05-01 func (client *RoutingIntentClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, options *RoutingIntentClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "RoutingIntentClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, routingIntentName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *RoutingIntentClient) deleteCreateRequest(ctx context.Context, reso // - options - RoutingIntentClientGetOptions contains the optional parameters for the RoutingIntentClient.Get method. func (client *RoutingIntentClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, routingIntentName string, options *RoutingIntentClientGetOptions) (RoutingIntentClientGetResponse, error) { var err error + const operationName = "RoutingIntentClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, routingIntentName, options) if err != nil { return RoutingIntentClientGetResponse{}, err @@ -275,25 +293,20 @@ func (client *RoutingIntentClient) NewListPager(resourceGroupName string, virtua return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *RoutingIntentClientListResponse) (RoutingIntentClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return RoutingIntentClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "RoutingIntentClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return RoutingIntentClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return RoutingIntentClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/scopeconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/scopeconnections_client.go index 24f9a5a2f..eed7c65cc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/scopeconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/scopeconnections_client.go @@ -34,7 +34,7 @@ type ScopeConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewScopeConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ScopeConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".ScopeConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,6 +57,10 @@ func NewScopeConnectionsClient(subscriptionID string, credential azcore.TokenCre // method. func (client *ScopeConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, parameters ScopeConnection, options *ScopeConnectionsClientCreateOrUpdateOptions) (ScopeConnectionsClientCreateOrUpdateResponse, error) { var err error + const operationName = "ScopeConnectionsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, scopeConnectionName, parameters, options) if err != nil { return ScopeConnectionsClientCreateOrUpdateResponse{}, err @@ -125,6 +129,10 @@ func (client *ScopeConnectionsClient) createOrUpdateHandleResponse(resp *http.Re // - options - ScopeConnectionsClientDeleteOptions contains the optional parameters for the ScopeConnectionsClient.Delete method. func (client *ScopeConnectionsClient) Delete(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, options *ScopeConnectionsClientDeleteOptions) (ScopeConnectionsClientDeleteResponse, error) { var err error + const operationName = "ScopeConnectionsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, scopeConnectionName, options) if err != nil { return ScopeConnectionsClientDeleteResponse{}, err @@ -180,6 +188,10 @@ func (client *ScopeConnectionsClient) deleteCreateRequest(ctx context.Context, r // - options - ScopeConnectionsClientGetOptions contains the optional parameters for the ScopeConnectionsClient.Get method. func (client *ScopeConnectionsClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, scopeConnectionName string, options *ScopeConnectionsClientGetOptions) (ScopeConnectionsClientGetResponse, error) { var err error + const operationName = "ScopeConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, scopeConnectionName, options) if err != nil { return ScopeConnectionsClientGetResponse{}, err @@ -248,25 +260,20 @@ func (client *ScopeConnectionsClient) NewListPager(resourceGroupName string, net return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ScopeConnectionsClientListResponse) (ScopeConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ScopeConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) + }, nil) if err != nil { return ScopeConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ScopeConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ScopeConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityadminconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityadminconfigurations_client.go index a16ab5c06..65a685236 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityadminconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityadminconfigurations_client.go @@ -34,7 +34,7 @@ type SecurityAdminConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSecurityAdminConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SecurityAdminConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".SecurityAdminConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,6 +57,10 @@ func NewSecurityAdminConfigurationsClient(subscriptionID string, credential azco // method. func (client *SecurityAdminConfigurationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, securityAdminConfiguration SecurityAdminConfiguration, options *SecurityAdminConfigurationsClientCreateOrUpdateOptions) (SecurityAdminConfigurationsClientCreateOrUpdateResponse, error) { var err error + const operationName = "SecurityAdminConfigurationsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, securityAdminConfiguration, options) if err != nil { return SecurityAdminConfigurationsClientCreateOrUpdateResponse{}, err @@ -132,10 +136,13 @@ func (client *SecurityAdminConfigurationsClient) BeginDelete(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityAdminConfigurationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityAdminConfigurationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityAdminConfigurationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -145,6 +152,10 @@ func (client *SecurityAdminConfigurationsClient) BeginDelete(ctx context.Context // Generated from API version 2023-05-01 func (client *SecurityAdminConfigurationsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *SecurityAdminConfigurationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "SecurityAdminConfigurationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) if err != nil { return nil, err @@ -204,6 +215,10 @@ func (client *SecurityAdminConfigurationsClient) deleteCreateRequest(ctx context // method. func (client *SecurityAdminConfigurationsClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, configurationName string, options *SecurityAdminConfigurationsClientGetOptions) (SecurityAdminConfigurationsClientGetResponse, error) { var err error + const operationName = "SecurityAdminConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, configurationName, options) if err != nil { return SecurityAdminConfigurationsClientGetResponse{}, err @@ -272,25 +287,20 @@ func (client *SecurityAdminConfigurationsClient) NewListPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityAdminConfigurationsClientListResponse) (SecurityAdminConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityAdminConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, options) + }, nil) if err != nil { return SecurityAdminConfigurationsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return SecurityAdminConfigurationsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityAdminConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitygroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitygroups_client.go index 11f1848cb..4465ab507 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitygroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitygroups_client.go @@ -33,7 +33,7 @@ type SecurityGroupsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSecurityGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SecurityGroupsClient, error) { - cl, err := arm.NewClient(moduleName+".SecurityGroupsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *SecurityGroupsClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityGroupsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityGroupsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityGroupsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *SecurityGroupsClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *SecurityGroupsClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, options *SecurityGroupsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "SecurityGroupsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *SecurityGroupsClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityGroupsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityGroupsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityGroupsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *SecurityGroupsClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *SecurityGroupsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, options *SecurityGroupsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "SecurityGroupsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *SecurityGroupsClient) deleteCreateRequest(ctx context.Context, res // - options - SecurityGroupsClientGetOptions contains the optional parameters for the SecurityGroupsClient.Get method. func (client *SecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, options *SecurityGroupsClientGetOptions) (SecurityGroupsClientGetResponse, error) { var err error + const operationName = "SecurityGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) if err != nil { return SecurityGroupsClientGetResponse{}, err @@ -262,25 +280,20 @@ func (client *SecurityGroupsClient) NewListPager(resourceGroupName string, optio return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityGroupsClientListResponse) (SecurityGroupsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return SecurityGroupsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityGroupsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return SecurityGroupsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityGroupsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -326,25 +339,20 @@ func (client *SecurityGroupsClient) NewListAllPager(options *SecurityGroupsClien return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityGroupsClientListAllResponse) (SecurityGroupsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityGroupsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return SecurityGroupsClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return SecurityGroupsClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityGroupsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -386,6 +394,10 @@ func (client *SecurityGroupsClient) listAllHandleResponse(resp *http.Response) ( // method. func (client *SecurityGroupsClient) UpdateTags(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters TagsObject, options *SecurityGroupsClientUpdateTagsOptions) (SecurityGroupsClientUpdateTagsResponse, error) { var err error + const operationName = "SecurityGroupsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, parameters, options) if err != nil { return SecurityGroupsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitypartnerproviders_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitypartnerproviders_client.go index ea42ace56..08f18ad20 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitypartnerproviders_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitypartnerproviders_client.go @@ -33,7 +33,7 @@ type SecurityPartnerProvidersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSecurityPartnerProvidersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SecurityPartnerProvidersClient, error) { - cl, err := arm.NewClient(moduleName+".SecurityPartnerProvidersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *SecurityPartnerProvidersClient) BeginCreateOrUpdate(ctx context.Co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityPartnerProvidersClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityPartnerProvidersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityPartnerProvidersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *SecurityPartnerProvidersClient) BeginCreateOrUpdate(ctx context.Co // Generated from API version 2023-05-01 func (client *SecurityPartnerProvidersClient) createOrUpdate(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, parameters SecurityPartnerProvider, options *SecurityPartnerProvidersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "SecurityPartnerProvidersClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, securityPartnerProviderName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *SecurityPartnerProvidersClient) BeginDelete(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityPartnerProvidersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityPartnerProvidersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityPartnerProvidersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *SecurityPartnerProvidersClient) BeginDelete(ctx context.Context, r // Generated from API version 2023-05-01 func (client *SecurityPartnerProvidersClient) deleteOperation(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, options *SecurityPartnerProvidersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "SecurityPartnerProvidersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, securityPartnerProviderName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *SecurityPartnerProvidersClient) deleteCreateRequest(ctx context.Co // method. func (client *SecurityPartnerProvidersClient) Get(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, options *SecurityPartnerProvidersClientGetOptions) (SecurityPartnerProvidersClientGetResponse, error) { var err error + const operationName = "SecurityPartnerProvidersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, securityPartnerProviderName, options) if err != nil { return SecurityPartnerProvidersClientGetResponse{}, err @@ -260,25 +278,20 @@ func (client *SecurityPartnerProvidersClient) NewListPager(options *SecurityPart return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityPartnerProvidersClientListResponse) (SecurityPartnerProvidersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return SecurityPartnerProvidersClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityPartnerProvidersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return SecurityPartnerProvidersClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityPartnerProvidersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -321,25 +334,20 @@ func (client *SecurityPartnerProvidersClient) NewListByResourceGroupPager(resour return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityPartnerProvidersClientListByResourceGroupResponse) (SecurityPartnerProvidersClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityPartnerProvidersClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return SecurityPartnerProvidersClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return SecurityPartnerProvidersClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityPartnerProvidersClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -385,6 +393,10 @@ func (client *SecurityPartnerProvidersClient) listByResourceGroupHandleResponse( // method. func (client *SecurityPartnerProvidersClient) UpdateTags(ctx context.Context, resourceGroupName string, securityPartnerProviderName string, parameters TagsObject, options *SecurityPartnerProvidersClientUpdateTagsOptions) (SecurityPartnerProvidersClientUpdateTagsResponse, error) { var err error + const operationName = "SecurityPartnerProvidersClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, securityPartnerProviderName, parameters, options) if err != nil { return SecurityPartnerProvidersClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityrules_client.go index 77f8bb4f4..bf51f10ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityrules_client.go @@ -33,7 +33,7 @@ type SecurityRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSecurityRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SecurityRulesClient, error) { - cl, err := arm.NewClient(moduleName+".SecurityRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *SecurityRulesClient) BeginCreateOrUpdate(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityRulesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityRulesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *SecurityRulesClient) BeginCreateOrUpdate(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *SecurityRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, options *SecurityRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "SecurityRulesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *SecurityRulesClient) BeginDelete(ctx context.Context, resourceGrou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SecurityRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SecurityRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SecurityRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *SecurityRulesClient) BeginDelete(ctx context.Context, resourceGrou // Generated from API version 2023-05-01 func (client *SecurityRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *SecurityRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "SecurityRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options) if err != nil { return nil, err @@ -208,6 +222,10 @@ func (client *SecurityRulesClient) deleteCreateRequest(ctx context.Context, reso // - options - SecurityRulesClientGetOptions contains the optional parameters for the SecurityRulesClient.Get method. func (client *SecurityRulesClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *SecurityRulesClientGetOptions) (SecurityRulesClientGetResponse, error) { var err error + const operationName = "SecurityRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options) if err != nil { return SecurityRulesClientGetResponse{}, err @@ -275,25 +293,20 @@ func (client *SecurityRulesClient) NewListPager(resourceGroupName string, networ return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SecurityRulesClientListResponse) (SecurityRulesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return SecurityRulesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SecurityRulesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkSecurityGroupName, options) + }, nil) if err != nil { return SecurityRulesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SecurityRulesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceassociationlinks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceassociationlinks_client.go index 8e21c4fec..da40a3502 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceassociationlinks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceassociationlinks_client.go @@ -33,7 +33,7 @@ type ServiceAssociationLinksClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewServiceAssociationLinksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceAssociationLinksClient, error) { - cl, err := arm.NewClient(moduleName+".ServiceAssociationLinksClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewServiceAssociationLinksClient(subscriptionID string, credential azcore.T // method. func (client *ServiceAssociationLinksClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *ServiceAssociationLinksClientListOptions) (ServiceAssociationLinksClientListResponse, error) { var err error + const operationName = "ServiceAssociationLinksClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, options) if err != nil { return ServiceAssociationLinksClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicies_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicies_client.go index d0ea81755..8609cd22a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicies_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicies_client.go @@ -33,7 +33,7 @@ type ServiceEndpointPoliciesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewServiceEndpointPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceEndpointPoliciesClient, error) { - cl, err := arm.NewClient(moduleName+".ServiceEndpointPoliciesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *ServiceEndpointPoliciesClient) BeginCreateOrUpdate(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ServiceEndpointPoliciesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ServiceEndpointPoliciesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ServiceEndpointPoliciesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *ServiceEndpointPoliciesClient) BeginCreateOrUpdate(ctx context.Con // Generated from API version 2023-05-01 func (client *ServiceEndpointPoliciesClient) createOrUpdate(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters ServiceEndpointPolicy, options *ServiceEndpointPoliciesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ServiceEndpointPoliciesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *ServiceEndpointPoliciesClient) BeginDelete(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ServiceEndpointPoliciesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ServiceEndpointPoliciesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ServiceEndpointPoliciesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *ServiceEndpointPoliciesClient) BeginDelete(ctx context.Context, re // Generated from API version 2023-05-01 func (client *ServiceEndpointPoliciesClient) deleteOperation(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, options *ServiceEndpointPoliciesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ServiceEndpointPoliciesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *ServiceEndpointPoliciesClient) deleteCreateRequest(ctx context.Con // method. func (client *ServiceEndpointPoliciesClient) Get(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, options *ServiceEndpointPoliciesClientGetOptions) (ServiceEndpointPoliciesClientGetResponse, error) { var err error + const operationName = "ServiceEndpointPoliciesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, options) if err != nil { return ServiceEndpointPoliciesClientGetResponse{}, err @@ -263,25 +281,20 @@ func (client *ServiceEndpointPoliciesClient) NewListPager(options *ServiceEndpoi return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ServiceEndpointPoliciesClientListResponse) (ServiceEndpointPoliciesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ServiceEndpointPoliciesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ServiceEndpointPoliciesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return ServiceEndpointPoliciesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceEndpointPoliciesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -324,25 +337,20 @@ func (client *ServiceEndpointPoliciesClient) NewListByResourceGroupPager(resourc return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ServiceEndpointPoliciesClientListByResourceGroupResponse) (ServiceEndpointPoliciesClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ServiceEndpointPoliciesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return ServiceEndpointPoliciesClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceEndpointPoliciesClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceEndpointPoliciesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -388,6 +396,10 @@ func (client *ServiceEndpointPoliciesClient) listByResourceGroupHandleResponse(r // method. func (client *ServiceEndpointPoliciesClient) UpdateTags(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, parameters TagsObject, options *ServiceEndpointPoliciesClientUpdateTagsOptions) (ServiceEndpointPoliciesClientUpdateTagsResponse, error) { var err error + const operationName = "ServiceEndpointPoliciesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, parameters, options) if err != nil { return ServiceEndpointPoliciesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicydefinitions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicydefinitions_client.go index 62f6a730b..ac6597319 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicydefinitions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicydefinitions_client.go @@ -33,7 +33,7 @@ type ServiceEndpointPolicyDefinitionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewServiceEndpointPolicyDefinitionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceEndpointPolicyDefinitionsClient, error) { - cl, err := arm.NewClient(moduleName+".ServiceEndpointPolicyDefinitionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *ServiceEndpointPolicyDefinitionsClient) BeginCreateOrUpdate(ctx co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ServiceEndpointPolicyDefinitionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *ServiceEndpointPolicyDefinitionsClient) BeginCreateOrUpdate(ctx co // Generated from API version 2023-05-01 func (client *ServiceEndpointPolicyDefinitionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, serviceEndpointPolicyDefinitions ServiceEndpointPolicyDefinition, options *ServiceEndpointPolicyDefinitionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "ServiceEndpointPolicyDefinitionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, serviceEndpointPolicyDefinitions, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *ServiceEndpointPolicyDefinitionsClient) BeginDelete(ctx context.Co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ServiceEndpointPolicyDefinitionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[ServiceEndpointPolicyDefinitionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ServiceEndpointPolicyDefinitionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *ServiceEndpointPolicyDefinitionsClient) BeginDelete(ctx context.Co // Generated from API version 2023-05-01 func (client *ServiceEndpointPolicyDefinitionsClient) deleteOperation(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, options *ServiceEndpointPolicyDefinitionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "ServiceEndpointPolicyDefinitionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *ServiceEndpointPolicyDefinitionsClient) deleteCreateRequest(ctx co // method. func (client *ServiceEndpointPolicyDefinitionsClient) Get(ctx context.Context, resourceGroupName string, serviceEndpointPolicyName string, serviceEndpointPolicyDefinitionName string, options *ServiceEndpointPolicyDefinitionsClientGetOptions) (ServiceEndpointPolicyDefinitionsClientGetResponse, error) { var err error + const operationName = "ServiceEndpointPolicyDefinitionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, options) if err != nil { return ServiceEndpointPolicyDefinitionsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *ServiceEndpointPolicyDefinitionsClient) NewListByResourceGroupPage return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse) (ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ServiceEndpointPolicyDefinitionsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, serviceEndpointPolicyName, options) + }, nil) if err != nil { return ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetaginformation_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetaginformation_client.go index 3379a4eb9..49d03beab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetaginformation_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetaginformation_client.go @@ -34,7 +34,7 @@ type ServiceTagInformationClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewServiceTagInformationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceTagInformationClient, error) { - cl, err := arm.NewClient(moduleName+".ServiceTagInformationClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -59,25 +59,20 @@ func (client *ServiceTagInformationClient) NewListPager(location string, options return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *ServiceTagInformationClientListResponse) (ServiceTagInformationClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ServiceTagInformationClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return ServiceTagInformationClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceTagInformationClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceTagInformationClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetags_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetags_client.go index fa1369cd4..f0ecb696d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetags_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetags_client.go @@ -33,7 +33,7 @@ type ServiceTagsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewServiceTagsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ServiceTagsClient, error) { - cl, err := arm.NewClient(moduleName+".ServiceTagsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewServiceTagsClient(subscriptionID string, credential azcore.TokenCredenti // - options - ServiceTagsClientListOptions contains the optional parameters for the ServiceTagsClient.List method. func (client *ServiceTagsClient) List(ctx context.Context, location string, options *ServiceTagsClientListOptions) (ServiceTagsClientListResponse, error) { var err error + const operationName = "ServiceTagsClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, location, options) if err != nil { return ServiceTagsClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/staticmembers_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/staticmembers_client.go index 79f39d40b..6e5e7ed02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/staticmembers_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/staticmembers_client.go @@ -34,7 +34,7 @@ type StaticMembersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewStaticMembersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*StaticMembersClient, error) { - cl, err := arm.NewClient(moduleName+".StaticMembersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -58,6 +58,10 @@ func NewStaticMembersClient(subscriptionID string, credential azcore.TokenCreden // method. func (client *StaticMembersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, parameters StaticMember, options *StaticMembersClientCreateOrUpdateOptions) (StaticMembersClientCreateOrUpdateResponse, error) { var err error + const operationName = "StaticMembersClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, staticMemberName, parameters, options) if err != nil { return StaticMembersClientCreateOrUpdateResponse{}, err @@ -131,6 +135,10 @@ func (client *StaticMembersClient) createOrUpdateHandleResponse(resp *http.Respo // - options - StaticMembersClientDeleteOptions contains the optional parameters for the StaticMembersClient.Delete method. func (client *StaticMembersClient) Delete(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, options *StaticMembersClientDeleteOptions) (StaticMembersClientDeleteResponse, error) { var err error + const operationName = "StaticMembersClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, staticMemberName, options) if err != nil { return StaticMembersClientDeleteResponse{}, err @@ -191,6 +199,10 @@ func (client *StaticMembersClient) deleteCreateRequest(ctx context.Context, reso // - options - StaticMembersClientGetOptions contains the optional parameters for the StaticMembersClient.Get method. func (client *StaticMembersClient) Get(ctx context.Context, resourceGroupName string, networkManagerName string, networkGroupName string, staticMemberName string, options *StaticMembersClientGetOptions) (StaticMembersClientGetResponse, error) { var err error + const operationName = "StaticMembersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, staticMemberName, options) if err != nil { return StaticMembersClientGetResponse{}, err @@ -263,25 +275,20 @@ func (client *StaticMembersClient) NewListPager(resourceGroupName string, networ return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *StaticMembersClientListResponse) (StaticMembersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "StaticMembersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkManagerName, networkGroupName, options) + }, nil) if err != nil { return StaticMembersClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return StaticMembersClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return StaticMembersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subnets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subnets_client.go index 31977cadc..6a73e8b6f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subnets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subnets_client.go @@ -33,7 +33,7 @@ type SubnetsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSubnetsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SubnetsClient, error) { - cl, err := arm.NewClient(moduleName+".SubnetsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *SubnetsClient) BeginCreateOrUpdate(ctx context.Context, resourceGr } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SubnetsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SubnetsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SubnetsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *SubnetsClient) BeginCreateOrUpdate(ctx context.Context, resourceGr // Generated from API version 2023-05-01 func (client *SubnetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, options *SubnetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "SubnetsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters, options) if err != nil { return nil, err @@ -139,10 +146,13 @@ func (client *SubnetsClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SubnetsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SubnetsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SubnetsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -152,6 +162,10 @@ func (client *SubnetsClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *SubnetsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *SubnetsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "SubnetsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, options) if err != nil { return nil, err @@ -207,6 +221,10 @@ func (client *SubnetsClient) deleteCreateRequest(ctx context.Context, resourceGr // - options - SubnetsClientGetOptions contains the optional parameters for the SubnetsClient.Get method. func (client *SubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *SubnetsClientGetOptions) (SubnetsClientGetResponse, error) { var err error + const operationName = "SubnetsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, options) if err != nil { return SubnetsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *SubnetsClient) NewListPager(resourceGroupName string, virtualNetwo return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SubnetsClientListResponse) (SubnetsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return SubnetsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SubnetsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) + }, nil) if err != nil { return SubnetsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SubnetsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -352,10 +365,13 @@ func (client *SubnetsClient) BeginPrepareNetworkPolicies(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SubnetsClientPrepareNetworkPoliciesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SubnetsClientPrepareNetworkPoliciesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SubnetsClientPrepareNetworkPoliciesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -365,6 +381,10 @@ func (client *SubnetsClient) BeginPrepareNetworkPolicies(ctx context.Context, re // Generated from API version 2023-05-01 func (client *SubnetsClient) prepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, prepareNetworkPoliciesRequestParameters PrepareNetworkPoliciesRequest, options *SubnetsClientBeginPrepareNetworkPoliciesOptions) (*http.Response, error) { var err error + const operationName = "SubnetsClient.BeginPrepareNetworkPolicies" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.prepareNetworkPoliciesCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, prepareNetworkPoliciesRequestParameters, options) if err != nil { return nil, err @@ -431,10 +451,13 @@ func (client *SubnetsClient) BeginUnprepareNetworkPolicies(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[SubnetsClientUnprepareNetworkPoliciesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[SubnetsClientUnprepareNetworkPoliciesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[SubnetsClientUnprepareNetworkPoliciesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -444,6 +467,10 @@ func (client *SubnetsClient) BeginUnprepareNetworkPolicies(ctx context.Context, // Generated from API version 2023-05-01 func (client *SubnetsClient) unprepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters UnprepareNetworkPoliciesRequest, options *SubnetsClientBeginUnprepareNetworkPoliciesOptions) (*http.Response, error) { var err error + const operationName = "SubnetsClient.BeginUnprepareNetworkPolicies" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.unprepareNetworkPoliciesCreateRequest(ctx, resourceGroupName, virtualNetworkName, subnetName, unprepareNetworkPoliciesRequestParameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subscriptionnetworkmanagerconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subscriptionnetworkmanagerconnections_client.go index f756bd114..0c5b7d5a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subscriptionnetworkmanagerconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subscriptionnetworkmanagerconnections_client.go @@ -34,7 +34,7 @@ type SubscriptionNetworkManagerConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewSubscriptionNetworkManagerConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SubscriptionNetworkManagerConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".SubscriptionNetworkManagerConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewSubscriptionNetworkManagerConnectionsClient(subscriptionID string, crede // method. func (client *SubscriptionNetworkManagerConnectionsClient) CreateOrUpdate(ctx context.Context, networkManagerConnectionName string, parameters ManagerConnection, options *SubscriptionNetworkManagerConnectionsClientCreateOrUpdateOptions) (SubscriptionNetworkManagerConnectionsClientCreateOrUpdateResponse, error) { var err error + const operationName = "SubscriptionNetworkManagerConnectionsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, networkManagerConnectionName, parameters, options) if err != nil { return SubscriptionNetworkManagerConnectionsClientCreateOrUpdateResponse{}, err @@ -114,6 +118,10 @@ func (client *SubscriptionNetworkManagerConnectionsClient) createOrUpdateHandleR // method. func (client *SubscriptionNetworkManagerConnectionsClient) Delete(ctx context.Context, networkManagerConnectionName string, options *SubscriptionNetworkManagerConnectionsClientDeleteOptions) (SubscriptionNetworkManagerConnectionsClientDeleteResponse, error) { var err error + const operationName = "SubscriptionNetworkManagerConnectionsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, networkManagerConnectionName, options) if err != nil { return SubscriptionNetworkManagerConnectionsClientDeleteResponse{}, err @@ -160,6 +168,10 @@ func (client *SubscriptionNetworkManagerConnectionsClient) deleteCreateRequest(c // method. func (client *SubscriptionNetworkManagerConnectionsClient) Get(ctx context.Context, networkManagerConnectionName string, options *SubscriptionNetworkManagerConnectionsClientGetOptions) (SubscriptionNetworkManagerConnectionsClientGetResponse, error) { var err error + const operationName = "SubscriptionNetworkManagerConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, networkManagerConnectionName, options) if err != nil { return SubscriptionNetworkManagerConnectionsClientGetResponse{}, err @@ -218,25 +230,20 @@ func (client *SubscriptionNetworkManagerConnectionsClient) NewListPager(options return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *SubscriptionNetworkManagerConnectionsClientListResponse) (SubscriptionNetworkManagerConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SubscriptionNetworkManagerConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return SubscriptionNetworkManagerConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return SubscriptionNetworkManagerConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return SubscriptionNetworkManagerConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/time_rfc3339.go index 851a2002c..ffdedf87e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/time_rfc3339.go @@ -18,50 +18,50 @@ import ( "time" ) -const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` -) - // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) -type timeRFC3339 time.Time +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON if tzOffsetRegex.Match(data) { - layout = rfc3339JSON + layout = dateTimeJSON } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime if tzOffsetRegex.Match(data) { layout = time.RFC3339Nano } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } -func populateTimeRFC3339(m map[string]any, k string, t *time.Time) { +func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return } else if azcore.IsNullValue(t) { @@ -70,14 +70,14 @@ func populateTimeRFC3339(m map[string]any, k string, t *time.Time) { } else if reflect.ValueOf(t).IsNil() { return } - m[k] = (*timeRFC3339)(t) + m[k] = (*dateTimeRFC3339)(t) } -func unpopulateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { +func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { if data == nil || strings.EqualFold(string(data), "null") { return nil } - var aux timeRFC3339 + var aux dateTimeRFC3339 if err := json.Unmarshal(data, &aux); err != nil { return fmt.Errorf("struct field %s: %v", fn, err) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/usages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/usages_client.go index 0368efe46..ebacd3e6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/usages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/usages_client.go @@ -33,7 +33,7 @@ type UsagesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewUsagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*UsagesClient, error) { - cl, err := arm.NewClient(moduleName+".UsagesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,25 +55,20 @@ func (client *UsagesClient) NewListPager(location string, options *UsagesClientL return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *UsagesClientListResponse) (UsagesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, location, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "UsagesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, location, options) + }, nil) if err != nil { return UsagesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return UsagesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return UsagesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vipswap_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vipswap_client.go index d317d341d..ac92776c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vipswap_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vipswap_client.go @@ -33,7 +33,7 @@ type VipSwapClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVipSwapClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VipSwapClient, error) { - cl, err := arm.NewClient(moduleName+".VipSwapClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -58,10 +58,14 @@ func (client *VipSwapClient) BeginCreate(ctx context.Context, groupName string, if err != nil { return nil, err } - poller, err := runtime.NewPoller[VipSwapClientCreateResponse](resp, client.internal.Pipeline(), nil) + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VipSwapClientCreateResponse]{ + Tracer: client.internal.Tracer(), + }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VipSwapClientCreateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VipSwapClientCreateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -71,6 +75,10 @@ func (client *VipSwapClient) BeginCreate(ctx context.Context, groupName string, // Generated from API version 2023-05-01 func (client *VipSwapClient) create(ctx context.Context, groupName string, resourceName string, parameters SwapResource, options *VipSwapClientBeginCreateOptions) (*http.Response, error) { var err error + const operationName = "VipSwapClient.BeginCreate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createCreateRequest(ctx, groupName, resourceName, parameters, options) if err != nil { return nil, err @@ -126,6 +134,10 @@ func (client *VipSwapClient) createCreateRequest(ctx context.Context, groupName // - options - VipSwapClientGetOptions contains the optional parameters for the VipSwapClient.Get method. func (client *VipSwapClient) Get(ctx context.Context, groupName string, resourceName string, options *VipSwapClientGetOptions) (VipSwapClientGetResponse, error) { var err error + const operationName = "VipSwapClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, groupName, resourceName, options) if err != nil { return VipSwapClientGetResponse{}, err @@ -188,6 +200,10 @@ func (client *VipSwapClient) getHandleResponse(resp *http.Response) (VipSwapClie // - options - VipSwapClientListOptions contains the optional parameters for the VipSwapClient.List method. func (client *VipSwapClient) List(ctx context.Context, groupName string, resourceName string, options *VipSwapClientListOptions) (VipSwapClientListResponse, error) { var err error + const operationName = "VipSwapClient.List" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, groupName, resourceName, options) if err != nil { return VipSwapClientListResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceconnections_client.go index 96661b620..9ecd1b48c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceconnections_client.go @@ -33,7 +33,7 @@ type VirtualApplianceConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualApplianceConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualApplianceConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualApplianceConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -63,10 +63,13 @@ func (client *VirtualApplianceConnectionsClient) BeginCreateOrUpdate(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualApplianceConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualApplianceConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualApplianceConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -76,6 +79,10 @@ func (client *VirtualApplianceConnectionsClient) BeginCreateOrUpdate(ctx context // Generated from API version 2023-05-01 func (client *VirtualApplianceConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, networkVirtualApplianceConnectionParameters VirtualApplianceConnection, options *VirtualApplianceConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualApplianceConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, connectionName, networkVirtualApplianceConnectionParameters, options) if err != nil { return nil, err @@ -141,10 +148,13 @@ func (client *VirtualApplianceConnectionsClient) BeginDelete(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualApplianceConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualApplianceConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualApplianceConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -154,6 +164,10 @@ func (client *VirtualApplianceConnectionsClient) BeginDelete(ctx context.Context // Generated from API version 2023-05-01 func (client *VirtualApplianceConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, options *VirtualApplianceConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualApplianceConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, connectionName, options) if err != nil { return nil, err @@ -210,6 +224,10 @@ func (client *VirtualApplianceConnectionsClient) deleteCreateRequest(ctx context // method. func (client *VirtualApplianceConnectionsClient) Get(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, connectionName string, options *VirtualApplianceConnectionsClientGetOptions) (VirtualApplianceConnectionsClientGetResponse, error) { var err error + const operationName = "VirtualApplianceConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, connectionName, options) if err != nil { return VirtualApplianceConnectionsClientGetResponse{}, err @@ -278,25 +296,20 @@ func (client *VirtualApplianceConnectionsClient) NewListPager(resourceGroupName return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualApplianceConnectionsClientListResponse) (VirtualApplianceConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualApplianceConnectionsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualApplianceConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) + }, nil) if err != nil { return VirtualApplianceConnectionsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualApplianceConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliances_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliances_client.go index 7d2ed1b5f..01fa3cfef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliances_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliances_client.go @@ -33,7 +33,7 @@ type VirtualAppliancesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualAppliancesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualAppliancesClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualAppliancesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualAppliancesClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualAppliancesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualAppliancesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualAppliancesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualAppliancesClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *VirtualAppliancesClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, parameters VirtualAppliance, options *VirtualAppliancesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualAppliancesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VirtualAppliancesClient) BeginDelete(ctx context.Context, resource } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualAppliancesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualAppliancesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualAppliancesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VirtualAppliancesClient) BeginDelete(ctx context.Context, resource // Generated from API version 2023-05-01 func (client *VirtualAppliancesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *VirtualAppliancesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualAppliancesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *VirtualAppliancesClient) deleteCreateRequest(ctx context.Context, // - options - VirtualAppliancesClientGetOptions contains the optional parameters for the VirtualAppliancesClient.Get method. func (client *VirtualAppliancesClient) Get(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *VirtualAppliancesClientGetOptions) (VirtualAppliancesClientGetResponse, error) { var err error + const operationName = "VirtualAppliancesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) if err != nil { return VirtualAppliancesClientGetResponse{}, err @@ -262,25 +280,20 @@ func (client *VirtualAppliancesClient) NewListPager(options *VirtualAppliancesCl return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualAppliancesClientListResponse) (VirtualAppliancesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualAppliancesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualAppliancesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualAppliancesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualAppliancesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -323,25 +336,20 @@ func (client *VirtualAppliancesClient) NewListByResourceGroupPager(resourceGroup return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualAppliancesClientListByResourceGroupResponse) (VirtualAppliancesClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualAppliancesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualAppliancesClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualAppliancesClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualAppliancesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -387,6 +395,10 @@ func (client *VirtualAppliancesClient) listByResourceGroupHandleResponse(resp *h // method. func (client *VirtualAppliancesClient) UpdateTags(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, parameters TagsObject, options *VirtualAppliancesClientUpdateTagsOptions) (VirtualAppliancesClientUpdateTagsResponse, error) { var err error + const operationName = "VirtualAppliancesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, parameters, options) if err != nil { return VirtualAppliancesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliancesites_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliancesites_client.go index e75bd2ce8..af8f589f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliancesites_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliancesites_client.go @@ -33,7 +33,7 @@ type VirtualApplianceSitesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualApplianceSitesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualApplianceSitesClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualApplianceSitesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualApplianceSitesClient) BeginCreateOrUpdate(ctx context.Conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualApplianceSitesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualApplianceSitesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualApplianceSitesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualApplianceSitesClient) BeginCreateOrUpdate(ctx context.Conte // Generated from API version 2023-05-01 func (client *VirtualApplianceSitesClient) createOrUpdate(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, parameters VirtualApplianceSite, options *VirtualApplianceSitesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualApplianceSitesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, siteName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *VirtualApplianceSitesClient) BeginDelete(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualApplianceSitesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualApplianceSitesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualApplianceSitesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *VirtualApplianceSitesClient) BeginDelete(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *VirtualApplianceSitesClient) deleteOperation(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualApplianceSitesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, siteName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VirtualApplianceSitesClient) deleteCreateRequest(ctx context.Conte // method. func (client *VirtualApplianceSitesClient) Get(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesClientGetOptions) (VirtualApplianceSitesClientGetResponse, error) { var err error + const operationName = "VirtualApplianceSitesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, siteName, options) if err != nil { return VirtualApplianceSitesClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *VirtualApplianceSitesClient) NewListPager(resourceGroupName string return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualApplianceSitesClientListResponse) (VirtualApplianceSitesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualApplianceSitesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualApplianceSitesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, networkVirtualApplianceName, options) + }, nil) if err != nil { return VirtualApplianceSitesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualApplianceSitesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceskus_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceskus_client.go index 49f709be5..2247d3c9c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceskus_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceskus_client.go @@ -33,7 +33,7 @@ type VirtualApplianceSKUsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualApplianceSKUsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualApplianceSKUsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualApplianceSKUsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -53,6 +53,10 @@ func NewVirtualApplianceSKUsClient(subscriptionID string, credential azcore.Toke // method. func (client *VirtualApplianceSKUsClient) Get(ctx context.Context, skuName string, options *VirtualApplianceSKUsClientGetOptions) (VirtualApplianceSKUsClientGetResponse, error) { var err error + const operationName = "VirtualApplianceSKUsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, skuName, options) if err != nil { return VirtualApplianceSKUsClientGetResponse{}, err @@ -111,25 +115,20 @@ func (client *VirtualApplianceSKUsClient) NewListPager(options *VirtualAppliance return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualApplianceSKUsClientListResponse) (VirtualApplianceSKUsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualApplianceSKUsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualApplianceSKUsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualApplianceSKUsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualApplianceSKUsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnection_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnection_client.go index b4952595a..1334b44f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnection_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnection_client.go @@ -33,7 +33,7 @@ type VirtualHubBgpConnectionClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualHubBgpConnectionClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualHubBgpConnectionClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualHubBgpConnectionClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualHubBgpConnectionClient) BeginCreateOrUpdate(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubBgpConnectionClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubBgpConnectionClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubBgpConnectionClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualHubBgpConnectionClient) BeginCreateOrUpdate(ctx context.Con // Generated from API version 2023-05-01 func (client *VirtualHubBgpConnectionClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, parameters BgpConnection, options *VirtualHubBgpConnectionClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubBgpConnectionClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *VirtualHubBgpConnectionClient) BeginDelete(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubBgpConnectionClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubBgpConnectionClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubBgpConnectionClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *VirtualHubBgpConnectionClient) BeginDelete(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VirtualHubBgpConnectionClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *VirtualHubBgpConnectionClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubBgpConnectionClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VirtualHubBgpConnectionClient) deleteCreateRequest(ctx context.Con // method. func (client *VirtualHubBgpConnectionClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, connectionName string, options *VirtualHubBgpConnectionClientGetOptions) (VirtualHubBgpConnectionClientGetResponse, error) { var err error + const operationName = "VirtualHubBgpConnectionClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, connectionName, options) if err != nil { return VirtualHubBgpConnectionClientGetResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnections_client.go index fda2975fe..80b64897a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnections_client.go @@ -33,7 +33,7 @@ type VirtualHubBgpConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualHubBgpConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualHubBgpConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualHubBgpConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -57,25 +57,20 @@ func (client *VirtualHubBgpConnectionsClient) NewListPager(resourceGroupName str return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualHubBgpConnectionsClientListResponse) (VirtualHubBgpConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualHubBgpConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return VirtualHubBgpConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualHubBgpConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualHubBgpConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -131,10 +126,13 @@ func (client *VirtualHubBgpConnectionsClient) BeginListAdvertisedRoutes(ctx cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubBgpConnectionsClientListAdvertisedRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -144,6 +142,10 @@ func (client *VirtualHubBgpConnectionsClient) BeginListAdvertisedRoutes(ctx cont // Generated from API version 2023-05-01 func (client *VirtualHubBgpConnectionsClient) listAdvertisedRoutes(ctx context.Context, resourceGroupName string, hubName string, connectionName string, options *VirtualHubBgpConnectionsClientBeginListAdvertisedRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubBgpConnectionsClient.BeginListAdvertisedRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAdvertisedRoutesCreateRequest(ctx, resourceGroupName, hubName, connectionName, options) if err != nil { return nil, err @@ -206,10 +208,13 @@ func (client *VirtualHubBgpConnectionsClient) BeginListLearnedRoutes(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubBgpConnectionsClientListLearnedRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubBgpConnectionsClientListLearnedRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubBgpConnectionsClientListLearnedRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -219,6 +224,10 @@ func (client *VirtualHubBgpConnectionsClient) BeginListLearnedRoutes(ctx context // Generated from API version 2023-05-01 func (client *VirtualHubBgpConnectionsClient) listLearnedRoutes(ctx context.Context, resourceGroupName string, hubName string, connectionName string, options *VirtualHubBgpConnectionsClientBeginListLearnedRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubBgpConnectionsClient.BeginListLearnedRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listLearnedRoutesCreateRequest(ctx, resourceGroupName, hubName, connectionName, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubipconfiguration_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubipconfiguration_client.go index 0b5195a65..2bd36082f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubipconfiguration_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubipconfiguration_client.go @@ -33,7 +33,7 @@ type VirtualHubIPConfigurationClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualHubIPConfigurationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualHubIPConfigurationClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualHubIPConfigurationClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualHubIPConfigurationClient) BeginCreateOrUpdate(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubIPConfigurationClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubIPConfigurationClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubIPConfigurationClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualHubIPConfigurationClient) BeginCreateOrUpdate(ctx context.C // Generated from API version 2023-05-01 func (client *VirtualHubIPConfigurationClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, parameters HubIPConfiguration, options *VirtualHubIPConfigurationClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubIPConfigurationClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, ipConfigName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *VirtualHubIPConfigurationClient) BeginDelete(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubIPConfigurationClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubIPConfigurationClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubIPConfigurationClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *VirtualHubIPConfigurationClient) BeginDelete(ctx context.Context, // Generated from API version 2023-05-01 func (client *VirtualHubIPConfigurationClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, options *VirtualHubIPConfigurationClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubIPConfigurationClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, ipConfigName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VirtualHubIPConfigurationClient) deleteCreateRequest(ctx context.C // method. func (client *VirtualHubIPConfigurationClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, ipConfigName string, options *VirtualHubIPConfigurationClientGetOptions) (VirtualHubIPConfigurationClientGetResponse, error) { var err error + const operationName = "VirtualHubIPConfigurationClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, ipConfigName, options) if err != nil { return VirtualHubIPConfigurationClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *VirtualHubIPConfigurationClient) NewListPager(resourceGroupName st return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualHubIPConfigurationClientListResponse) (VirtualHubIPConfigurationClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualHubIPConfigurationClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualHubIPConfigurationClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return VirtualHubIPConfigurationClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualHubIPConfigurationClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubroutetablev2s_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubroutetablev2s_client.go index a3affc392..421996273 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubroutetablev2s_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubroutetablev2s_client.go @@ -33,7 +33,7 @@ type VirtualHubRouteTableV2SClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualHubRouteTableV2SClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualHubRouteTableV2SClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualHubRouteTableV2SClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualHubRouteTableV2SClient) BeginCreateOrUpdate(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubRouteTableV2SClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubRouteTableV2SClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubRouteTableV2SClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualHubRouteTableV2SClient) BeginCreateOrUpdate(ctx context.Con // Generated from API version 2023-05-01 func (client *VirtualHubRouteTableV2SClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, virtualHubRouteTableV2Parameters VirtualHubRouteTableV2, options *VirtualHubRouteTableV2SClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubRouteTableV2SClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, virtualHubRouteTableV2Parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *VirtualHubRouteTableV2SClient) BeginDelete(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubRouteTableV2SClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubRouteTableV2SClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubRouteTableV2SClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *VirtualHubRouteTableV2SClient) BeginDelete(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VirtualHubRouteTableV2SClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *VirtualHubRouteTableV2SClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubRouteTableV2SClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VirtualHubRouteTableV2SClient) deleteCreateRequest(ctx context.Con // method. func (client *VirtualHubRouteTableV2SClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, routeTableName string, options *VirtualHubRouteTableV2SClientGetOptions) (VirtualHubRouteTableV2SClientGetResponse, error) { var err error + const operationName = "VirtualHubRouteTableV2SClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, routeTableName, options) if err != nil { return VirtualHubRouteTableV2SClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *VirtualHubRouteTableV2SClient) NewListPager(resourceGroupName stri return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualHubRouteTableV2SClientListResponse) (VirtualHubRouteTableV2SClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualHubRouteTableV2SClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualHubRouteTableV2SClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualHubName, options) + }, nil) if err != nil { return VirtualHubRouteTableV2SClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualHubRouteTableV2SClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubs_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubs_client.go index c473beaef..3cb2a8d30 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubs_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubs_client.go @@ -33,7 +33,7 @@ type VirtualHubsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualHubsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualHubsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualHubsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualHubsClient) BeginCreateOrUpdate(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualHubsClient) BeginCreateOrUpdate(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *VirtualHubsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualHubName string, virtualHubParameters VirtualHub, options *VirtualHubsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualHubName, virtualHubParameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *VirtualHubsClient) BeginDelete(ctx context.Context, resourceGroupN } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *VirtualHubsClient) BeginDelete(ctx context.Context, resourceGroupN // Generated from API version 2023-05-01 func (client *VirtualHubsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualHubName string, options *VirtualHubsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualHubName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *VirtualHubsClient) deleteCreateRequest(ctx context.Context, resour // - options - VirtualHubsClientGetOptions contains the optional parameters for the VirtualHubsClient.Get method. func (client *VirtualHubsClient) Get(ctx context.Context, resourceGroupName string, virtualHubName string, options *VirtualHubsClientGetOptions) (VirtualHubsClientGetResponse, error) { var err error + const operationName = "VirtualHubsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualHubName, options) if err != nil { return VirtualHubsClientGetResponse{}, err @@ -264,10 +282,13 @@ func (client *VirtualHubsClient) BeginGetEffectiveVirtualHubRoutes(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubsClientGetEffectiveVirtualHubRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubsClientGetEffectiveVirtualHubRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubsClientGetEffectiveVirtualHubRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -278,6 +299,10 @@ func (client *VirtualHubsClient) BeginGetEffectiveVirtualHubRoutes(ctx context.C // Generated from API version 2023-05-01 func (client *VirtualHubsClient) getEffectiveVirtualHubRoutes(ctx context.Context, resourceGroupName string, virtualHubName string, options *VirtualHubsClientBeginGetEffectiveVirtualHubRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubsClient.BeginGetEffectiveVirtualHubRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getEffectiveVirtualHubRoutesCreateRequest(ctx, resourceGroupName, virtualHubName, options) if err != nil { return nil, err @@ -342,10 +367,13 @@ func (client *VirtualHubsClient) BeginGetInboundRoutes(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubsClientGetInboundRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubsClientGetInboundRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubsClientGetInboundRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -355,6 +383,10 @@ func (client *VirtualHubsClient) BeginGetInboundRoutes(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *VirtualHubsClient) getInboundRoutes(ctx context.Context, resourceGroupName string, virtualHubName string, getInboundRoutesParameters GetInboundRoutesParameters, options *VirtualHubsClientBeginGetInboundRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubsClient.BeginGetInboundRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getInboundRoutesCreateRequest(ctx, resourceGroupName, virtualHubName, getInboundRoutesParameters, options) if err != nil { return nil, err @@ -416,10 +448,13 @@ func (client *VirtualHubsClient) BeginGetOutboundRoutes(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualHubsClientGetOutboundRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualHubsClientGetOutboundRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualHubsClientGetOutboundRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -429,6 +464,10 @@ func (client *VirtualHubsClient) BeginGetOutboundRoutes(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VirtualHubsClient) getOutboundRoutes(ctx context.Context, resourceGroupName string, virtualHubName string, getOutboundRoutesParameters GetOutboundRoutesParameters, options *VirtualHubsClientBeginGetOutboundRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualHubsClient.BeginGetOutboundRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getOutboundRoutesCreateRequest(ctx, resourceGroupName, virtualHubName, getOutboundRoutesParameters, options) if err != nil { return nil, err @@ -483,25 +522,20 @@ func (client *VirtualHubsClient) NewListPager(options *VirtualHubsClientListOpti return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualHubsClientListResponse) (VirtualHubsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualHubsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualHubsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualHubsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualHubsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -544,25 +578,20 @@ func (client *VirtualHubsClient) NewListByResourceGroupPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualHubsClientListByResourceGroupResponse) (VirtualHubsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualHubsClientListByResourceGroupResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualHubsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualHubsClientListByResourceGroupResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualHubsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -607,6 +636,10 @@ func (client *VirtualHubsClient) listByResourceGroupHandleResponse(resp *http.Re // - options - VirtualHubsClientUpdateTagsOptions contains the optional parameters for the VirtualHubsClient.UpdateTags method. func (client *VirtualHubsClient) UpdateTags(ctx context.Context, resourceGroupName string, virtualHubName string, virtualHubParameters TagsObject, options *VirtualHubsClientUpdateTagsOptions) (VirtualHubsClientUpdateTagsResponse, error) { var err error + const operationName = "VirtualHubsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, virtualHubName, virtualHubParameters, options) if err != nil { return VirtualHubsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewayconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewayconnections_client.go index 29e424903..f1bdd90b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewayconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewayconnections_client.go @@ -33,7 +33,7 @@ type VirtualNetworkGatewayConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworkGatewayConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworkGatewayConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginCreateOrUpdate(ctx co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginCreateOrUpdate(ctx co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, options *VirtualNetworkGatewayConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginDelete(ctx context.Co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginDelete(ctx context.Co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) deleteCreateRequest(ctx co // method. func (client *VirtualNetworkGatewayConnectionsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientGetOptions) (VirtualNetworkGatewayConnectionsClientGetResponse, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return VirtualNetworkGatewayConnectionsClientGetResponse{}, err @@ -265,10 +283,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginGetIkeSas(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientGetIkeSasResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientGetIkeSasResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientGetIkeSasResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -278,6 +299,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginGetIkeSas(ctx context // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) getIkeSas(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientBeginGetIkeSasOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginGetIkeSas" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getIkeSasCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return nil, err @@ -330,6 +355,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) getIkeSasCreateRequest(ctx // method. func (client *VirtualNetworkGatewayConnectionsClient) GetSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientGetSharedKeyOptions) (VirtualNetworkGatewayConnectionsClientGetSharedKeyResponse, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.GetSharedKey" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getSharedKeyCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return VirtualNetworkGatewayConnectionsClientGetSharedKeyResponse{}, err @@ -394,25 +423,20 @@ func (client *VirtualNetworkGatewayConnectionsClient) NewListPager(resourceGroup return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkGatewayConnectionsClientListResponse) (VirtualNetworkGatewayConnectionsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkGatewayConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualNetworkGatewayConnectionsClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualNetworkGatewayConnectionsClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkGatewayConnectionsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -463,10 +487,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginResetConnection(ctx c } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientResetConnectionResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientResetConnectionResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientResetConnectionResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -476,6 +503,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginResetConnection(ctx c // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) resetConnection(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientBeginResetConnectionOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginResetConnection" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetConnectionCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return nil, err @@ -537,10 +568,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginResetSharedKey(ctx co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientResetSharedKeyResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -552,6 +586,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginResetSharedKey(ctx co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) resetSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, options *VirtualNetworkGatewayConnectionsClientBeginResetSharedKeyOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginResetSharedKey" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetSharedKeyCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return nil, err @@ -616,10 +654,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginSetSharedKey(ctx cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientSetSharedKeyResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -631,6 +672,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginSetSharedKey(ctx cont // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) setSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, options *VirtualNetworkGatewayConnectionsClientBeginSetSharedKeyOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginSetSharedKey" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.setSharedKeyCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return nil, err @@ -691,10 +736,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginStartPacketCapture(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientStartPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -704,6 +752,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginStartPacketCapture(ct // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) startPacketCapture(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, options *VirtualNetworkGatewayConnectionsClientBeginStartPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginStartPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startPacketCaptureCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, options) if err != nil { return nil, err @@ -768,10 +820,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginStopPacketCapture(ctx } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientStopPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -781,6 +836,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginStopPacketCapture(ctx // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) stopPacketCapture(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VPNPacketCaptureStopParameters, options *VirtualNetworkGatewayConnectionsClientBeginStopPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginStopPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopPacketCaptureCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return nil, err @@ -842,10 +901,13 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginUpdateTags(ctx contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayConnectionsClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayConnectionsClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayConnectionsClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -855,6 +917,10 @@ func (client *VirtualNetworkGatewayConnectionsClient) BeginUpdateTags(ctx contex // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayConnectionsClient) updateTags(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters TagsObject, options *VirtualNetworkGatewayConnectionsClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayConnectionsClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewaynatrules_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewaynatrules_client.go index f3754932e..28dd7a3b7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewaynatrules_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewaynatrules_client.go @@ -33,7 +33,7 @@ type VirtualNetworkGatewayNatRulesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworkGatewayNatRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworkGatewayNatRulesClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworkGatewayNatRulesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -63,10 +63,13 @@ func (client *VirtualNetworkGatewayNatRulesClient) BeginCreateOrUpdate(ctx conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayNatRulesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -77,6 +80,10 @@ func (client *VirtualNetworkGatewayNatRulesClient) BeginCreateOrUpdate(ctx conte // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayNatRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, natRuleParameters VirtualNetworkGatewayNatRule, options *VirtualNetworkGatewayNatRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayNatRulesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, natRuleName, natRuleParameters, options) if err != nil { return nil, err @@ -142,10 +149,13 @@ func (client *VirtualNetworkGatewayNatRulesClient) BeginDelete(ctx context.Conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewayNatRulesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewayNatRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewayNatRulesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -155,6 +165,10 @@ func (client *VirtualNetworkGatewayNatRulesClient) BeginDelete(ctx context.Conte // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewayNatRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, options *VirtualNetworkGatewayNatRulesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewayNatRulesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, natRuleName, options) if err != nil { return nil, err @@ -211,6 +225,10 @@ func (client *VirtualNetworkGatewayNatRulesClient) deleteCreateRequest(ctx conte // method. func (client *VirtualNetworkGatewayNatRulesClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, natRuleName string, options *VirtualNetworkGatewayNatRulesClientGetOptions) (VirtualNetworkGatewayNatRulesClientGetResponse, error) { var err error + const operationName = "VirtualNetworkGatewayNatRulesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, natRuleName, options) if err != nil { return VirtualNetworkGatewayNatRulesClientGetResponse{}, err @@ -279,25 +297,20 @@ func (client *VirtualNetworkGatewayNatRulesClient) NewListByVirtualNetworkGatewa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse) (VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVirtualNetworkGatewayCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkGatewayNatRulesClient.NewListByVirtualNetworkGatewayPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVirtualNetworkGatewayCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) + }, nil) if err != nil { return VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayResponse{}, runtime.NewResponseError(resp) - } return client.listByVirtualNetworkGatewayHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgateways_client.go index 2ff9abf98..e1bf690f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgateways_client.go @@ -33,7 +33,7 @@ type VirtualNetworkGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworkGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworkGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworkGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualNetworkGatewaysClient) BeginCreateOrUpdate(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualNetworkGatewaysClient) BeginCreateOrUpdate(ctx context.Cont // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, options *VirtualNetworkGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VirtualNetworkGatewaysClient) BeginDelete(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VirtualNetworkGatewaysClient) BeginDelete(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -206,10 +220,13 @@ func (client *VirtualNetworkGatewaysClient) BeginDisconnectVirtualNetworkGateway } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientDisconnectVirtualNetworkGatewayVPNConnectionsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -220,6 +237,10 @@ func (client *VirtualNetworkGatewaysClient) BeginDisconnectVirtualNetworkGateway // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) disconnectVirtualNetworkGatewayVPNConnections(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, request P2SVPNConnectionRequest, options *VirtualNetworkGatewaysClientBeginDisconnectVirtualNetworkGatewayVPNConnectionsOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginDisconnectVirtualNetworkGatewayVPNConnections" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.disconnectVirtualNetworkGatewayVPNConnectionsCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, request, options) if err != nil { return nil, err @@ -282,10 +303,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGenerateVPNProfile(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGenerateVPNProfileResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGenerateVPNProfileResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGenerateVPNProfileResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -296,6 +320,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGenerateVPNProfile(ctx context. // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) generateVPNProfile(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VPNClientParameters, options *VirtualNetworkGatewaysClientBeginGenerateVPNProfileOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGenerateVPNProfile" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.generateVPNProfileCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -358,10 +386,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGeneratevpnclientpackage(ctx co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGeneratevpnclientpackageResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -372,6 +403,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGeneratevpnclientpackage(ctx co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) generatevpnclientpackage(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VPNClientParameters, options *VirtualNetworkGatewaysClientBeginGeneratevpnclientpackageOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGeneratevpnclientpackage" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.generatevpnclientpackageCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -426,6 +461,10 @@ func (client *VirtualNetworkGatewaysClient) generatevpnclientpackageCreateReques // method. func (client *VirtualNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientGetOptions) (VirtualNetworkGatewaysClientGetResponse, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return VirtualNetworkGatewaysClientGetResponse{}, err @@ -495,10 +534,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetAdvertisedRoutes(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetAdvertisedRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -509,6 +551,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetAdvertisedRoutes(ctx context // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getAdvertisedRoutes(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, peer string, options *VirtualNetworkGatewaysClientBeginGetAdvertisedRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetAdvertisedRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getAdvertisedRoutesCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, peer, options) if err != nil { return nil, err @@ -567,10 +613,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetBgpPeerStatus(ctx context.Co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetBgpPeerStatusResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetBgpPeerStatusResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetBgpPeerStatusResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -580,6 +629,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetBgpPeerStatus(ctx context.Co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getBgpPeerStatus(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginGetBgpPeerStatusOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetBgpPeerStatus" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getBgpPeerStatusCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -641,10 +694,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetLearnedRoutes(ctx context.Co } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetLearnedRoutesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetLearnedRoutesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetLearnedRoutesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -655,6 +711,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetLearnedRoutes(ctx context.Co // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getLearnedRoutes(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginGetLearnedRoutesOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetLearnedRoutes" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getLearnedRoutesCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -713,10 +773,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVPNProfilePackageURL(ctx con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetVPNProfilePackageURLResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -727,6 +790,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVPNProfilePackageURL(ctx con // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getVPNProfilePackageURL(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginGetVPNProfilePackageURLOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetVPNProfilePackageURL" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVPNProfilePackageURLCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -785,10 +852,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVpnclientConnectionHealth(ct } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetVpnclientConnectionHealthResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -799,6 +869,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVpnclientConnectionHealth(ct // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getVpnclientConnectionHealth(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginGetVpnclientConnectionHealthOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetVpnclientConnectionHealth" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVpnclientConnectionHealthCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -858,10 +932,13 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVpnclientIPSecParameters(ctx } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientGetVpnclientIPSecParametersResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -873,6 +950,10 @@ func (client *VirtualNetworkGatewaysClient) BeginGetVpnclientIPSecParameters(ctx // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) getVpnclientIPSecParameters(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginGetVpnclientIPSecParametersOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginGetVpnclientIPSecParameters" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVpnclientIPSecParametersCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -926,25 +1007,20 @@ func (client *VirtualNetworkGatewaysClient) NewListPager(resourceGroupName strin return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkGatewaysClientListResponse) (VirtualNetworkGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualNetworkGatewaysClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualNetworkGatewaysClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -992,25 +1068,20 @@ func (client *VirtualNetworkGatewaysClient) NewListConnectionsPager(resourceGrou return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkGatewaysClientListConnectionsResponse) (VirtualNetworkGatewaysClientListConnectionsResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listConnectionsCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworkGatewaysClientListConnectionsResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkGatewaysClient.NewListConnectionsPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listConnectionsCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) + }, nil) if err != nil { return VirtualNetworkGatewaysClientListConnectionsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkGatewaysClientListConnectionsResponse{}, runtime.NewResponseError(resp) - } return client.listConnectionsHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -1065,10 +1136,13 @@ func (client *VirtualNetworkGatewaysClient) BeginReset(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientResetResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientResetResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientResetResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1078,6 +1152,10 @@ func (client *VirtualNetworkGatewaysClient) BeginReset(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) reset(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginResetOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginReset" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -1139,10 +1217,13 @@ func (client *VirtualNetworkGatewaysClient) BeginResetVPNClientSharedKey(ctx con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientResetVPNClientSharedKeyResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1152,6 +1233,10 @@ func (client *VirtualNetworkGatewaysClient) BeginResetVPNClientSharedKey(ctx con // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) resetVPNClientSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginResetVPNClientSharedKeyOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginResetVPNClientSharedKey" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetVPNClientSharedKeyCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -1212,10 +1297,13 @@ func (client *VirtualNetworkGatewaysClient) BeginSetVpnclientIPSecParameters(ctx } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientSetVpnclientIPSecParametersResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1226,6 +1314,10 @@ func (client *VirtualNetworkGatewaysClient) BeginSetVpnclientIPSecParameters(ctx // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) setVpnclientIPSecParameters(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, vpnclientIPSecParams VPNClientIPsecParameters, options *VirtualNetworkGatewaysClientBeginSetVpnclientIPSecParametersOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginSetVpnclientIPSecParameters" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.setVpnclientIPSecParametersCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, vpnclientIPSecParams, options) if err != nil { return nil, err @@ -1286,10 +1378,13 @@ func (client *VirtualNetworkGatewaysClient) BeginStartPacketCapture(ctx context. } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientStartPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientStartPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientStartPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1299,6 +1394,10 @@ func (client *VirtualNetworkGatewaysClient) BeginStartPacketCapture(ctx context. // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) startPacketCapture(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientBeginStartPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginStartPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startPacketCaptureCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return nil, err @@ -1363,10 +1462,13 @@ func (client *VirtualNetworkGatewaysClient) BeginStopPacketCapture(ctx context.C } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientStopPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientStopPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientStopPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1376,6 +1478,10 @@ func (client *VirtualNetworkGatewaysClient) BeginStopPacketCapture(ctx context.C // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) stopPacketCapture(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VPNPacketCaptureStopParameters, options *VirtualNetworkGatewaysClientBeginStopPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginStopPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopPacketCaptureCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -1430,6 +1536,10 @@ func (client *VirtualNetworkGatewaysClient) stopPacketCaptureCreateRequest(ctx c // method. func (client *VirtualNetworkGatewaysClient) SupportedVPNDevices(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *VirtualNetworkGatewaysClientSupportedVPNDevicesOptions) (VirtualNetworkGatewaysClientSupportedVPNDevicesResponse, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.SupportedVPNDevices" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.supportedVPNDevicesCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, options) if err != nil { return VirtualNetworkGatewaysClientSupportedVPNDevicesResponse{}, err @@ -1498,10 +1608,13 @@ func (client *VirtualNetworkGatewaysClient) BeginUpdateTags(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkGatewaysClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkGatewaysClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkGatewaysClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1511,6 +1624,10 @@ func (client *VirtualNetworkGatewaysClient) BeginUpdateTags(ctx context.Context, // Generated from API version 2023-05-01 func (client *VirtualNetworkGatewaysClient) updateTags(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters TagsObject, options *VirtualNetworkGatewaysClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayName, parameters, options) if err != nil { return nil, err @@ -1567,6 +1684,10 @@ func (client *VirtualNetworkGatewaysClient) updateTagsCreateRequest(ctx context. // method. func (client *VirtualNetworkGatewaysClient) VPNDeviceConfigurationScript(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VPNDeviceScriptParameters, options *VirtualNetworkGatewaysClientVPNDeviceConfigurationScriptOptions) (VirtualNetworkGatewaysClientVPNDeviceConfigurationScriptResponse, error) { var err error + const operationName = "VirtualNetworkGatewaysClient.VPNDeviceConfigurationScript" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.vpnDeviceConfigurationScriptCreateRequest(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters, options) if err != nil { return VirtualNetworkGatewaysClientVPNDeviceConfigurationScriptResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkpeerings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkpeerings_client.go index 923ddf890..eaa5d8314 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkpeerings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkpeerings_client.go @@ -33,7 +33,7 @@ type VirtualNetworkPeeringsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworkPeeringsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworkPeeringsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworkPeeringsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualNetworkPeeringsClient) BeginCreateOrUpdate(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkPeeringsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkPeeringsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkPeeringsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualNetworkPeeringsClient) BeginCreateOrUpdate(ctx context.Cont // Generated from API version 2023-05-01 func (client *VirtualNetworkPeeringsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, options *VirtualNetworkPeeringsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkPeeringsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, options) if err != nil { return nil, err @@ -143,10 +150,13 @@ func (client *VirtualNetworkPeeringsClient) BeginDelete(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkPeeringsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkPeeringsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkPeeringsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -156,6 +166,10 @@ func (client *VirtualNetworkPeeringsClient) BeginDelete(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VirtualNetworkPeeringsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, options *VirtualNetworkPeeringsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkPeeringsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, options) if err != nil { return nil, err @@ -212,6 +226,10 @@ func (client *VirtualNetworkPeeringsClient) deleteCreateRequest(ctx context.Cont // method. func (client *VirtualNetworkPeeringsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, options *VirtualNetworkPeeringsClientGetOptions) (VirtualNetworkPeeringsClientGetResponse, error) { var err error + const operationName = "VirtualNetworkPeeringsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, options) if err != nil { return VirtualNetworkPeeringsClientGetResponse{}, err @@ -280,25 +298,20 @@ func (client *VirtualNetworkPeeringsClient) NewListPager(resourceGroupName strin return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkPeeringsClientListResponse) (VirtualNetworkPeeringsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworkPeeringsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkPeeringsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) + }, nil) if err != nil { return VirtualNetworkPeeringsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkPeeringsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworks_client.go index bfd38e8fb..6feedb751 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworks_client.go @@ -34,7 +34,7 @@ type VirtualNetworksClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworksClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworksClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,6 +56,10 @@ func NewVirtualNetworksClient(subscriptionID string, credential azcore.TokenCred // method. func (client *VirtualNetworksClient) CheckIPAddressAvailability(ctx context.Context, resourceGroupName string, virtualNetworkName string, ipAddress string, options *VirtualNetworksClientCheckIPAddressAvailabilityOptions) (VirtualNetworksClientCheckIPAddressAvailabilityResponse, error) { var err error + const operationName = "VirtualNetworksClient.CheckIPAddressAvailability" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.checkIPAddressAvailabilityCreateRequest(ctx, resourceGroupName, virtualNetworkName, ipAddress, options) if err != nil { return VirtualNetworksClientCheckIPAddressAvailabilityResponse{}, err @@ -125,10 +129,13 @@ func (client *VirtualNetworksClient) BeginCreateOrUpdate(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworksClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworksClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworksClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -138,6 +145,10 @@ func (client *VirtualNetworksClient) BeginCreateOrUpdate(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VirtualNetworksClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, options *VirtualNetworksClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworksClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualNetworkName, parameters, options) if err != nil { return nil, err @@ -198,10 +209,13 @@ func (client *VirtualNetworksClient) BeginDelete(ctx context.Context, resourceGr } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworksClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworksClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworksClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -211,6 +225,10 @@ func (client *VirtualNetworksClient) BeginDelete(ctx context.Context, resourceGr // Generated from API version 2023-05-01 func (client *VirtualNetworksClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *VirtualNetworksClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworksClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) if err != nil { return nil, err @@ -261,6 +279,10 @@ func (client *VirtualNetworksClient) deleteCreateRequest(ctx context.Context, re // - options - VirtualNetworksClientGetOptions contains the optional parameters for the VirtualNetworksClient.Get method. func (client *VirtualNetworksClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *VirtualNetworksClientGetOptions) (VirtualNetworksClientGetResponse, error) { var err error + const operationName = "VirtualNetworksClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) if err != nil { return VirtualNetworksClientGetResponse{}, err @@ -327,25 +349,20 @@ func (client *VirtualNetworksClient) NewListPager(resourceGroupName string, opti return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworksClientListResponse) (VirtualNetworksClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworksClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworksClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualNetworksClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworksClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -391,25 +408,20 @@ func (client *VirtualNetworksClient) NewListAllPager(options *VirtualNetworksCli return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworksClientListAllResponse) (VirtualNetworksClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworksClientListAllResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworksClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualNetworksClientListAllResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworksClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -453,19 +465,16 @@ func (client *VirtualNetworksClient) BeginListDdosProtectionStatus(ctx context.C return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworksClientListDdosProtectionStatusResponse) (VirtualNetworksClientListDdosProtectionStatusResponse, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworksClient.BeginListDdosProtectionStatus") + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), *page.NextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listDdosProtectionStatusCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) + }, nil) if err != nil { return VirtualNetworksClientListDdosProtectionStatusResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualNetworksClientListDdosProtectionStatusResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworksClientListDdosProtectionStatusResponse{}, runtime.NewResponseError(resp) - } return client.listDdosProtectionStatusHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) if options == nil || options.ResumeToken == "" { resp, err := client.listDdosProtectionStatus(ctx, resourceGroupName, virtualNetworkName, options) @@ -475,11 +484,13 @@ func (client *VirtualNetworksClient) BeginListDdosProtectionStatus(ctx context.C poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[*runtime.Pager[VirtualNetworksClientListDdosProtectionStatusResponse]]{ FinalStateVia: runtime.FinalStateViaLocation, Response: &pager, + Tracer: client.internal.Tracer(), }) return poller, err } else { return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[*runtime.Pager[VirtualNetworksClientListDdosProtectionStatusResponse]]{ Response: &pager, + Tracer: client.internal.Tracer(), }) } } @@ -489,6 +500,10 @@ func (client *VirtualNetworksClient) BeginListDdosProtectionStatus(ctx context.C // Generated from API version 2023-05-01 func (client *VirtualNetworksClient) listDdosProtectionStatus(ctx context.Context, resourceGroupName string, virtualNetworkName string, options *VirtualNetworksClientBeginListDdosProtectionStatusOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworksClient.BeginListDdosProtectionStatus" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listDdosProtectionStatusCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) if err != nil { return nil, err @@ -558,25 +573,20 @@ func (client *VirtualNetworksClient) NewListUsagePager(resourceGroupName string, return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworksClientListUsageResponse) (VirtualNetworksClientListUsageResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listUsageCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworksClient.NewListUsagePager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listUsageCreateRequest(ctx, resourceGroupName, virtualNetworkName, options) + }, nil) if err != nil { return VirtualNetworksClientListUsageResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualNetworksClientListUsageResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworksClientListUsageResponse{}, runtime.NewResponseError(resp) - } return client.listUsageHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -626,6 +636,10 @@ func (client *VirtualNetworksClient) listUsageHandleResponse(resp *http.Response // method. func (client *VirtualNetworksClient) UpdateTags(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters TagsObject, options *VirtualNetworksClientUpdateTagsOptions) (VirtualNetworksClientUpdateTagsResponse, error) { var err error + const operationName = "VirtualNetworksClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, virtualNetworkName, parameters, options) if err != nil { return VirtualNetworksClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworktaps_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworktaps_client.go index 3963dc911..3ab3dd130 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworktaps_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworktaps_client.go @@ -33,7 +33,7 @@ type VirtualNetworkTapsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualNetworkTapsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualNetworkTapsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualNetworkTapsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualNetworkTapsClient) BeginCreateOrUpdate(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkTapsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkTapsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkTapsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualNetworkTapsClient) BeginCreateOrUpdate(ctx context.Context, // Generated from API version 2023-05-01 func (client *VirtualNetworkTapsClient) createOrUpdate(ctx context.Context, resourceGroupName string, tapName string, parameters VirtualNetworkTap, options *VirtualNetworkTapsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkTapsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, tapName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VirtualNetworkTapsClient) BeginDelete(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualNetworkTapsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualNetworkTapsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualNetworkTapsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VirtualNetworkTapsClient) BeginDelete(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *VirtualNetworkTapsClient) deleteOperation(ctx context.Context, resourceGroupName string, tapName string, options *VirtualNetworkTapsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualNetworkTapsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, tapName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *VirtualNetworkTapsClient) deleteCreateRequest(ctx context.Context, // - options - VirtualNetworkTapsClientGetOptions contains the optional parameters for the VirtualNetworkTapsClient.Get method. func (client *VirtualNetworkTapsClient) Get(ctx context.Context, resourceGroupName string, tapName string, options *VirtualNetworkTapsClientGetOptions) (VirtualNetworkTapsClientGetResponse, error) { var err error + const operationName = "VirtualNetworkTapsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, tapName, options) if err != nil { return VirtualNetworkTapsClientGetResponse{}, err @@ -259,25 +277,20 @@ func (client *VirtualNetworkTapsClient) NewListAllPager(options *VirtualNetworkT return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkTapsClientListAllResponse) (VirtualNetworkTapsClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualNetworkTapsClientListAllResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkTapsClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualNetworkTapsClientListAllResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkTapsClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -320,25 +333,20 @@ func (client *VirtualNetworkTapsClient) NewListByResourceGroupPager(resourceGrou return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualNetworkTapsClientListByResourceGroupResponse) (VirtualNetworkTapsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualNetworkTapsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualNetworkTapsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualNetworkTapsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualNetworkTapsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -384,6 +392,10 @@ func (client *VirtualNetworkTapsClient) listByResourceGroupHandleResponse(resp * // method. func (client *VirtualNetworkTapsClient) UpdateTags(ctx context.Context, resourceGroupName string, tapName string, tapParameters TagsObject, options *VirtualNetworkTapsClientUpdateTagsOptions) (VirtualNetworkTapsClientUpdateTagsResponse, error) { var err error + const operationName = "VirtualNetworkTapsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, tapName, tapParameters, options) if err != nil { return VirtualNetworkTapsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouterpeerings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouterpeerings_client.go index e43884136..984e683b7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouterpeerings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouterpeerings_client.go @@ -33,7 +33,7 @@ type VirtualRouterPeeringsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualRouterPeeringsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualRouterPeeringsClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualRouterPeeringsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VirtualRouterPeeringsClient) BeginCreateOrUpdate(ctx context.Conte } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualRouterPeeringsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualRouterPeeringsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualRouterPeeringsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VirtualRouterPeeringsClient) BeginCreateOrUpdate(ctx context.Conte // Generated from API version 2023-05-01 func (client *VirtualRouterPeeringsClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, parameters VirtualRouterPeering, options *VirtualRouterPeeringsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualRouterPeeringsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualRouterName, peeringName, parameters, options) if err != nil { return nil, err @@ -140,10 +147,13 @@ func (client *VirtualRouterPeeringsClient) BeginDelete(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualRouterPeeringsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualRouterPeeringsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualRouterPeeringsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -153,6 +163,10 @@ func (client *VirtualRouterPeeringsClient) BeginDelete(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *VirtualRouterPeeringsClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, options *VirtualRouterPeeringsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualRouterPeeringsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualRouterName, peeringName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VirtualRouterPeeringsClient) deleteCreateRequest(ctx context.Conte // method. func (client *VirtualRouterPeeringsClient) Get(ctx context.Context, resourceGroupName string, virtualRouterName string, peeringName string, options *VirtualRouterPeeringsClientGetOptions) (VirtualRouterPeeringsClientGetResponse, error) { var err error + const operationName = "VirtualRouterPeeringsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualRouterName, peeringName, options) if err != nil { return VirtualRouterPeeringsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *VirtualRouterPeeringsClient) NewListPager(resourceGroupName string return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualRouterPeeringsClientListResponse) (VirtualRouterPeeringsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, virtualRouterName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualRouterPeeringsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualRouterPeeringsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, virtualRouterName, options) + }, nil) if err != nil { return VirtualRouterPeeringsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualRouterPeeringsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouters_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouters_client.go index 0dcb79ad9..8b803a5bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouters_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouters_client.go @@ -33,7 +33,7 @@ type VirtualRoutersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualRoutersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualRoutersClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualRoutersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualRoutersClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualRoutersClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualRoutersClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualRoutersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualRoutersClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VirtualRoutersClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualRouterName string, parameters VirtualRouter, options *VirtualRoutersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualRoutersClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualRouterName, parameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VirtualRoutersClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualRoutersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualRoutersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualRoutersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VirtualRoutersClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *VirtualRoutersClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualRouterName string, options *VirtualRoutersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualRoutersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualRouterName, options) if err != nil { return nil, err @@ -197,6 +211,10 @@ func (client *VirtualRoutersClient) deleteCreateRequest(ctx context.Context, res // - options - VirtualRoutersClientGetOptions contains the optional parameters for the VirtualRoutersClient.Get method. func (client *VirtualRoutersClient) Get(ctx context.Context, resourceGroupName string, virtualRouterName string, options *VirtualRoutersClientGetOptions) (VirtualRoutersClientGetResponse, error) { var err error + const operationName = "VirtualRoutersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualRouterName, options) if err != nil { return VirtualRoutersClientGetResponse{}, err @@ -261,25 +279,20 @@ func (client *VirtualRoutersClient) NewListPager(options *VirtualRoutersClientLi return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualRoutersClientListResponse) (VirtualRoutersClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualRoutersClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualRoutersClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualRoutersClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualRoutersClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -322,25 +335,20 @@ func (client *VirtualRoutersClient) NewListByResourceGroupPager(resourceGroupNam return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualRoutersClientListByResourceGroupResponse) (VirtualRoutersClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualRoutersClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualRoutersClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualRoutersClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualRoutersClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualwans_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualwans_client.go index f3e6988b9..3b5210fc9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualwans_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualwans_client.go @@ -33,7 +33,7 @@ type VirtualWansClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVirtualWansClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualWansClient, error) { - cl, err := arm.NewClient(moduleName+".VirtualWansClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VirtualWansClient) BeginCreateOrUpdate(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualWansClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualWansClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualWansClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VirtualWansClient) BeginCreateOrUpdate(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *VirtualWansClient) createOrUpdate(ctx context.Context, resourceGroupName string, virtualWANName string, wanParameters VirtualWAN, options *VirtualWansClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VirtualWansClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, virtualWANName, wanParameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *VirtualWansClient) BeginDelete(ctx context.Context, resourceGroupN } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualWansClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VirtualWansClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualWansClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *VirtualWansClient) BeginDelete(ctx context.Context, resourceGroupN // Generated from API version 2023-05-01 func (client *VirtualWansClient) deleteOperation(ctx context.Context, resourceGroupName string, virtualWANName string, options *VirtualWansClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VirtualWansClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, virtualWANName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *VirtualWansClient) deleteCreateRequest(ctx context.Context, resour // - options - VirtualWansClientGetOptions contains the optional parameters for the VirtualWansClient.Get method. func (client *VirtualWansClient) Get(ctx context.Context, resourceGroupName string, virtualWANName string, options *VirtualWansClientGetOptions) (VirtualWansClientGetResponse, error) { var err error + const operationName = "VirtualWansClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, virtualWANName, options) if err != nil { return VirtualWansClientGetResponse{}, err @@ -257,25 +275,20 @@ func (client *VirtualWansClient) NewListPager(options *VirtualWansClientListOpti return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualWansClientListResponse) (VirtualWansClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VirtualWansClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualWansClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VirtualWansClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualWansClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -318,25 +331,20 @@ func (client *VirtualWansClient) NewListByResourceGroupPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VirtualWansClientListByResourceGroupResponse) (VirtualWansClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VirtualWansClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VirtualWansClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VirtualWansClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VirtualWansClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -381,6 +389,10 @@ func (client *VirtualWansClient) listByResourceGroupHandleResponse(resp *http.Re // - options - VirtualWansClientUpdateTagsOptions contains the optional parameters for the VirtualWansClient.UpdateTags method. func (client *VirtualWansClient) UpdateTags(ctx context.Context, resourceGroupName string, virtualWANName string, wanParameters TagsObject, options *VirtualWansClientUpdateTagsOptions) (VirtualWansClientUpdateTagsResponse, error) { var err error + const operationName = "VirtualWansClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, virtualWANName, wanParameters, options) if err != nil { return VirtualWansClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnconnections_client.go index a82b7f75e..a18a74883 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnconnections_client.go @@ -33,7 +33,7 @@ type VPNConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VPNConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -63,10 +63,13 @@ func (client *VPNConnectionsClient) BeginCreateOrUpdate(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNConnectionsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNConnectionsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNConnectionsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -76,6 +79,10 @@ func (client *VPNConnectionsClient) BeginCreateOrUpdate(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VPNConnectionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, vpnConnectionParameters VPNConnection, options *VPNConnectionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VPNConnectionsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, vpnConnectionParameters, options) if err != nil { return nil, err @@ -141,10 +148,13 @@ func (client *VPNConnectionsClient) BeginDelete(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNConnectionsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNConnectionsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -154,6 +164,10 @@ func (client *VPNConnectionsClient) BeginDelete(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *VPNConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, options *VPNConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VPNConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, options) if err != nil { return nil, err @@ -209,6 +223,10 @@ func (client *VPNConnectionsClient) deleteCreateRequest(ctx context.Context, res // - options - VPNConnectionsClientGetOptions contains the optional parameters for the VPNConnectionsClient.Get method. func (client *VPNConnectionsClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, options *VPNConnectionsClientGetOptions) (VPNConnectionsClientGetResponse, error) { var err error + const operationName = "VPNConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, options) if err != nil { return VPNConnectionsClientGetResponse{}, err @@ -277,25 +295,20 @@ func (client *VPNConnectionsClient) NewListByVPNGatewayPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNConnectionsClientListByVPNGatewayResponse) (VPNConnectionsClientListByVPNGatewayResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVPNGatewayCreateRequest(ctx, resourceGroupName, gatewayName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VPNConnectionsClientListByVPNGatewayResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNConnectionsClient.NewListByVPNGatewayPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVPNGatewayCreateRequest(ctx, resourceGroupName, gatewayName, options) + }, nil) if err != nil { return VPNConnectionsClientListByVPNGatewayResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNConnectionsClientListByVPNGatewayResponse{}, runtime.NewResponseError(resp) - } return client.listByVPNGatewayHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -351,10 +364,13 @@ func (client *VPNConnectionsClient) BeginStartPacketCapture(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNConnectionsClientStartPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNConnectionsClientStartPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNConnectionsClientStartPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -364,6 +380,10 @@ func (client *VPNConnectionsClient) BeginStartPacketCapture(ctx context.Context, // Generated from API version 2023-05-01 func (client *VPNConnectionsClient) startPacketCapture(ctx context.Context, resourceGroupName string, gatewayName string, vpnConnectionName string, options *VPNConnectionsClientBeginStartPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VPNConnectionsClient.BeginStartPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startPacketCaptureCreateRequest(ctx, resourceGroupName, gatewayName, vpnConnectionName, options) if err != nil { return nil, err @@ -432,10 +452,13 @@ func (client *VPNConnectionsClient) BeginStopPacketCapture(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNConnectionsClientStopPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNConnectionsClientStopPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNConnectionsClientStopPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -445,6 +468,10 @@ func (client *VPNConnectionsClient) BeginStopPacketCapture(ctx context.Context, // Generated from API version 2023-05-01 func (client *VPNConnectionsClient) stopPacketCapture(ctx context.Context, resourceGroupName string, gatewayName string, vpnConnectionName string, options *VPNConnectionsClientBeginStopPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VPNConnectionsClient.BeginStopPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopPacketCaptureCreateRequest(ctx, resourceGroupName, gatewayName, vpnConnectionName, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpngateways_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpngateways_client.go index a44104994..5d28c7988 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpngateways_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpngateways_client.go @@ -33,7 +33,7 @@ type VPNGatewaysClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNGatewaysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNGatewaysClient, error) { - cl, err := arm.NewClient(moduleName+".VPNGatewaysClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VPNGatewaysClient) BeginCreateOrUpdate(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VPNGatewaysClient) BeginCreateOrUpdate(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) createOrUpdate(ctx context.Context, resourceGroupName string, gatewayName string, vpnGatewayParameters VPNGateway, options *VPNGatewaysClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, gatewayName, vpnGatewayParameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *VPNGatewaysClient) BeginDelete(ctx context.Context, resourceGroupN } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *VPNGatewaysClient) BeginDelete(ctx context.Context, resourceGroupN // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) deleteOperation(ctx context.Context, resourceGroupName string, gatewayName string, options *VPNGatewaysClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *VPNGatewaysClient) deleteCreateRequest(ctx context.Context, resour // - options - VPNGatewaysClientGetOptions contains the optional parameters for the VPNGatewaysClient.Get method. func (client *VPNGatewaysClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, options *VPNGatewaysClientGetOptions) (VPNGatewaysClientGetResponse, error) { var err error + const operationName = "VPNGatewaysClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return VPNGatewaysClientGetResponse{}, err @@ -257,25 +275,20 @@ func (client *VPNGatewaysClient) NewListPager(options *VPNGatewaysClientListOpti return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNGatewaysClientListResponse) (VPNGatewaysClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNGatewaysClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VPNGatewaysClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VPNGatewaysClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNGatewaysClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -318,25 +331,20 @@ func (client *VPNGatewaysClient) NewListByResourceGroupPager(resourceGroupName s return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNGatewaysClientListByResourceGroupResponse) (VPNGatewaysClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VPNGatewaysClientListByResourceGroupResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNGatewaysClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VPNGatewaysClientListByResourceGroupResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNGatewaysClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -386,10 +394,13 @@ func (client *VPNGatewaysClient) BeginReset(ctx context.Context, resourceGroupNa } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientResetResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientResetResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientResetResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -399,6 +410,10 @@ func (client *VPNGatewaysClient) BeginReset(ctx context.Context, resourceGroupNa // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) reset(ctx context.Context, resourceGroupName string, gatewayName string, options *VPNGatewaysClientBeginResetOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginReset" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -459,10 +474,13 @@ func (client *VPNGatewaysClient) BeginStartPacketCapture(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientStartPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientStartPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientStartPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -472,6 +490,10 @@ func (client *VPNGatewaysClient) BeginStartPacketCapture(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) startPacketCapture(ctx context.Context, resourceGroupName string, gatewayName string, options *VPNGatewaysClientBeginStartPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginStartPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.startPacketCaptureCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -535,10 +557,13 @@ func (client *VPNGatewaysClient) BeginStopPacketCapture(ctx context.Context, res } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientStopPacketCaptureResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientStopPacketCaptureResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientStopPacketCaptureResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -548,6 +573,10 @@ func (client *VPNGatewaysClient) BeginStopPacketCapture(ctx context.Context, res // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) stopPacketCapture(ctx context.Context, resourceGroupName string, gatewayName string, options *VPNGatewaysClientBeginStopPacketCaptureOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginStopPacketCapture" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.stopPacketCaptureCreateRequest(ctx, resourceGroupName, gatewayName, options) if err != nil { return nil, err @@ -612,10 +641,13 @@ func (client *VPNGatewaysClient) BeginUpdateTags(ctx context.Context, resourceGr } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNGatewaysClientUpdateTagsResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNGatewaysClientUpdateTagsResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNGatewaysClientUpdateTagsResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -625,6 +657,10 @@ func (client *VPNGatewaysClient) BeginUpdateTags(ctx context.Context, resourceGr // Generated from API version 2023-05-01 func (client *VPNGatewaysClient) updateTags(ctx context.Context, resourceGroupName string, gatewayName string, vpnGatewayParameters TagsObject, options *VPNGatewaysClientBeginUpdateTagsOptions) (*http.Response, error) { var err error + const operationName = "VPNGatewaysClient.BeginUpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, gatewayName, vpnGatewayParameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnlinkconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnlinkconnections_client.go index 86bd6c0bd..c9e13cc5f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnlinkconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnlinkconnections_client.go @@ -33,7 +33,7 @@ type VPNLinkConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNLinkConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNLinkConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VPNLinkConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *VPNLinkConnectionsClient) BeginGetIkeSas(ctx context.Context, reso } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNLinkConnectionsClientGetIkeSasResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNLinkConnectionsClientGetIkeSasResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNLinkConnectionsClientGetIkeSasResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -75,6 +78,10 @@ func (client *VPNLinkConnectionsClient) BeginGetIkeSas(ctx context.Context, reso // Generated from API version 2023-05-01 func (client *VPNLinkConnectionsClient) getIkeSas(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *VPNLinkConnectionsClientBeginGetIkeSasOptions) (*http.Response, error) { var err error + const operationName = "VPNLinkConnectionsClient.BeginGetIkeSas" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getIkeSasCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, linkConnectionName, options) if err != nil { return nil, err @@ -138,25 +145,20 @@ func (client *VPNLinkConnectionsClient) NewListByVPNConnectionPager(resourceGrou return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNLinkConnectionsClientListByVPNConnectionResponse) (VPNLinkConnectionsClientListByVPNConnectionResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVPNConnectionCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VPNLinkConnectionsClientListByVPNConnectionResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNLinkConnectionsClient.NewListByVPNConnectionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVPNConnectionCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, options) + }, nil) if err != nil { return VPNLinkConnectionsClientListByVPNConnectionResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNLinkConnectionsClientListByVPNConnectionResponse{}, runtime.NewResponseError(resp) - } return client.listByVPNConnectionHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -217,10 +219,13 @@ func (client *VPNLinkConnectionsClient) BeginResetConnection(ctx context.Context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNLinkConnectionsClientResetConnectionResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNLinkConnectionsClientResetConnectionResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNLinkConnectionsClientResetConnectionResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -230,6 +235,10 @@ func (client *VPNLinkConnectionsClient) BeginResetConnection(ctx context.Context // Generated from API version 2023-05-01 func (client *VPNLinkConnectionsClient) resetConnection(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *VPNLinkConnectionsClientBeginResetConnectionOptions) (*http.Response, error) { var err error + const operationName = "VPNLinkConnectionsClient.BeginResetConnection" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.resetConnectionCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, linkConnectionName, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurations_client.go index feed90e56..1cb954690 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurations_client.go @@ -33,7 +33,7 @@ type VPNServerConfigurationsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNServerConfigurationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNServerConfigurationsClient, error) { - cl, err := arm.NewClient(moduleName+".VPNServerConfigurationsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VPNServerConfigurationsClient) BeginCreateOrUpdate(ctx context.Con } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNServerConfigurationsClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNServerConfigurationsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNServerConfigurationsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VPNServerConfigurationsClient) BeginCreateOrUpdate(ctx context.Con // Generated from API version 2023-05-01 func (client *VPNServerConfigurationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, vpnServerConfigurationParameters VPNServerConfiguration, options *VPNServerConfigurationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VPNServerConfigurationsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, vpnServerConfigurationParameters, options) if err != nil { return nil, err @@ -134,10 +141,13 @@ func (client *VPNServerConfigurationsClient) BeginDelete(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNServerConfigurationsClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNServerConfigurationsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNServerConfigurationsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -147,6 +157,10 @@ func (client *VPNServerConfigurationsClient) BeginDelete(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VPNServerConfigurationsClient) deleteOperation(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, options *VPNServerConfigurationsClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VPNServerConfigurationsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, options) if err != nil { return nil, err @@ -198,6 +212,10 @@ func (client *VPNServerConfigurationsClient) deleteCreateRequest(ctx context.Con // method. func (client *VPNServerConfigurationsClient) Get(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, options *VPNServerConfigurationsClientGetOptions) (VPNServerConfigurationsClientGetResponse, error) { var err error + const operationName = "VPNServerConfigurationsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, options) if err != nil { return VPNServerConfigurationsClientGetResponse{}, err @@ -260,25 +278,20 @@ func (client *VPNServerConfigurationsClient) NewListPager(options *VPNServerConf return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNServerConfigurationsClientListResponse) (VPNServerConfigurationsClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VPNServerConfigurationsClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNServerConfigurationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VPNServerConfigurationsClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNServerConfigurationsClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -321,25 +334,20 @@ func (client *VPNServerConfigurationsClient) NewListByResourceGroupPager(resourc return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNServerConfigurationsClientListByResourceGroupResponse) (VPNServerConfigurationsClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNServerConfigurationsClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VPNServerConfigurationsClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VPNServerConfigurationsClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNServerConfigurationsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -385,6 +393,10 @@ func (client *VPNServerConfigurationsClient) listByResourceGroupHandleResponse(r // method. func (client *VPNServerConfigurationsClient) UpdateTags(ctx context.Context, resourceGroupName string, vpnServerConfigurationName string, vpnServerConfigurationParameters TagsObject, options *VPNServerConfigurationsClientUpdateTagsOptions) (VPNServerConfigurationsClientUpdateTagsResponse, error) { var err error + const operationName = "VPNServerConfigurationsClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, vpnServerConfigurationName, vpnServerConfigurationParameters, options) if err != nil { return VPNServerConfigurationsClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurationsassociatedwithvirtualwan_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurationsassociatedwithvirtualwan_client.go index b8feb7ff1..f7e5490f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurationsassociatedwithvirtualwan_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurationsassociatedwithvirtualwan_client.go @@ -33,7 +33,7 @@ type VPNServerConfigurationsAssociatedWithVirtualWanClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNServerConfigurationsAssociatedWithVirtualWanClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNServerConfigurationsAssociatedWithVirtualWanClient, error) { - cl, err := arm.NewClient(moduleName+".VPNServerConfigurationsAssociatedWithVirtualWanClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -60,10 +60,13 @@ func (client *VPNServerConfigurationsAssociatedWithVirtualWanClient) BeginList(c } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNServerConfigurationsAssociatedWithVirtualWanClientListResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -73,6 +76,10 @@ func (client *VPNServerConfigurationsAssociatedWithVirtualWanClient) BeginList(c // Generated from API version 2023-05-01 func (client *VPNServerConfigurationsAssociatedWithVirtualWanClient) listOperation(ctx context.Context, resourceGroupName string, virtualWANName string, options *VPNServerConfigurationsAssociatedWithVirtualWanClientBeginListOptions) (*http.Response, error) { var err error + const operationName = "VPNServerConfigurationsAssociatedWithVirtualWanClient.BeginList" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listCreateRequest(ctx, resourceGroupName, virtualWANName, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinkconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinkconnections_client.go index 4c0c45d18..ce9c64340 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinkconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinkconnections_client.go @@ -33,7 +33,7 @@ type VPNSiteLinkConnectionsClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNSiteLinkConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNSiteLinkConnectionsClient, error) { - cl, err := arm.NewClient(moduleName+".VPNSiteLinkConnectionsClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -56,6 +56,10 @@ func NewVPNSiteLinkConnectionsClient(subscriptionID string, credential azcore.To // method. func (client *VPNSiteLinkConnectionsClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string, options *VPNSiteLinkConnectionsClientGetOptions) (VPNSiteLinkConnectionsClientGetResponse, error) { var err error + const operationName = "VPNSiteLinkConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, gatewayName, connectionName, linkConnectionName, options) if err != nil { return VPNSiteLinkConnectionsClientGetResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinks_client.go index 124ae6ecb..ae77b690b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinks_client.go @@ -33,7 +33,7 @@ type VPNSiteLinksClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNSiteLinksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNSiteLinksClient, error) { - cl, err := arm.NewClient(moduleName+".VPNSiteLinksClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -54,6 +54,10 @@ func NewVPNSiteLinksClient(subscriptionID string, credential azcore.TokenCredent // - options - VPNSiteLinksClientGetOptions contains the optional parameters for the VPNSiteLinksClient.Get method. func (client *VPNSiteLinksClient) Get(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteLinkName string, options *VPNSiteLinksClientGetOptions) (VPNSiteLinksClientGetResponse, error) { var err error + const operationName = "VPNSiteLinksClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, vpnSiteName, vpnSiteLinkName, options) if err != nil { return VPNSiteLinksClientGetResponse{}, err @@ -122,25 +126,20 @@ func (client *VPNSiteLinksClient) NewListByVPNSitePager(resourceGroupName string return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNSiteLinksClientListByVPNSiteResponse) (VPNSiteLinksClientListByVPNSiteResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByVPNSiteCreateRequest(ctx, resourceGroupName, vpnSiteName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNSiteLinksClient.NewListByVPNSitePager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByVPNSiteCreateRequest(ctx, resourceGroupName, vpnSiteName, options) + }, nil) if err != nil { return VPNSiteLinksClientListByVPNSiteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VPNSiteLinksClientListByVPNSiteResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNSiteLinksClientListByVPNSiteResponse{}, runtime.NewResponseError(resp) - } return client.listByVPNSiteHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsites_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsites_client.go index bbdbcef64..ea91d8bec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsites_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsites_client.go @@ -33,7 +33,7 @@ type VPNSitesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNSitesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNSitesClient, error) { - cl, err := arm.NewClient(moduleName+".VPNSitesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VPNSitesClient) BeginCreateOrUpdate(ctx context.Context, resourceG } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNSitesClientCreateOrUpdateResponse]{ FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNSitesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNSitesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VPNSitesClient) BeginCreateOrUpdate(ctx context.Context, resourceG // Generated from API version 2023-05-01 func (client *VPNSitesClient) createOrUpdate(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters VPNSite, options *VPNSitesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error + const operationName = "VPNSitesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters, options) if err != nil { return nil, err @@ -133,10 +140,13 @@ func (client *VPNSitesClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNSitesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNSitesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNSitesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -146,6 +156,10 @@ func (client *VPNSitesClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *VPNSitesClient) deleteOperation(ctx context.Context, resourceGroupName string, vpnSiteName string, options *VPNSitesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "VPNSitesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, vpnSiteName, options) if err != nil { return nil, err @@ -196,6 +210,10 @@ func (client *VPNSitesClient) deleteCreateRequest(ctx context.Context, resourceG // - options - VPNSitesClientGetOptions contains the optional parameters for the VPNSitesClient.Get method. func (client *VPNSitesClient) Get(ctx context.Context, resourceGroupName string, vpnSiteName string, options *VPNSitesClientGetOptions) (VPNSitesClientGetResponse, error) { var err error + const operationName = "VPNSitesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, vpnSiteName, options) if err != nil { return VPNSitesClientGetResponse{}, err @@ -257,25 +275,20 @@ func (client *VPNSitesClient) NewListPager(options *VPNSitesClientListOptions) * return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNSitesClientListResponse) (VPNSitesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) - } - if err != nil { - return VPNSitesClientListResponse{}, err + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNSitesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) if err != nil { return VPNSitesClientListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNSitesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -318,25 +331,20 @@ func (client *VPNSitesClient) NewListByResourceGroupPager(resourceGroupName stri return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *VPNSitesClientListByResourceGroupResponse) (VPNSitesClientListByResourceGroupResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "VPNSitesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return VPNSitesClientListByResourceGroupResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return VPNSitesClientListByResourceGroupResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return VPNSitesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) - } return client.listByResourceGroupHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -381,6 +389,10 @@ func (client *VPNSitesClient) listByResourceGroupHandleResponse(resp *http.Respo // - options - VPNSitesClientUpdateTagsOptions contains the optional parameters for the VPNSitesClient.UpdateTags method. func (client *VPNSitesClient) UpdateTags(ctx context.Context, resourceGroupName string, vpnSiteName string, vpnSiteParameters TagsObject, options *VPNSitesClientUpdateTagsOptions) (VPNSitesClientUpdateTagsResponse, error) { var err error + const operationName = "VPNSitesClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, vpnSiteName, vpnSiteParameters, options) if err != nil { return VPNSitesClientUpdateTagsResponse{}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitesconfiguration_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitesconfiguration_client.go index 92f858074..ae821016a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitesconfiguration_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitesconfiguration_client.go @@ -33,7 +33,7 @@ type VPNSitesConfigurationClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewVPNSitesConfigurationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VPNSitesConfigurationClient, error) { - cl, err := arm.NewClient(moduleName+".VPNSitesConfigurationClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -61,10 +61,13 @@ func (client *VPNSitesConfigurationClient) BeginDownload(ctx context.Context, re } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VPNSitesConfigurationClientDownloadResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[VPNSitesConfigurationClientDownloadResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VPNSitesConfigurationClientDownloadResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -74,6 +77,10 @@ func (client *VPNSitesConfigurationClient) BeginDownload(ctx context.Context, re // Generated from API version 2023-05-01 func (client *VPNSitesConfigurationClient) download(ctx context.Context, resourceGroupName string, virtualWANName string, request GetVPNSitesConfigurationRequest, options *VPNSitesConfigurationClientBeginDownloadOptions) (*http.Response, error) { var err error + const operationName = "VPNSitesConfigurationClient.BeginDownload" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.downloadCreateRequest(ctx, resourceGroupName, virtualWANName, request, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/watchers_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/watchers_client.go index 0dfd46432..b29eebfb1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/watchers_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/watchers_client.go @@ -33,7 +33,7 @@ type WatchersClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewWatchersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*WatchersClient, error) { - cl, err := arm.NewClient(moduleName+".WatchersClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -62,10 +62,13 @@ func (client *WatchersClient) BeginCheckConnectivity(ctx context.Context, resour } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientCheckConnectivityResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientCheckConnectivityResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientCheckConnectivityResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -76,6 +79,10 @@ func (client *WatchersClient) BeginCheckConnectivity(ctx context.Context, resour // Generated from API version 2023-05-01 func (client *WatchersClient) checkConnectivity(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters ConnectivityParameters, options *WatchersClientBeginCheckConnectivityOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginCheckConnectivity" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.checkConnectivityCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -130,6 +137,10 @@ func (client *WatchersClient) checkConnectivityCreateRequest(ctx context.Context // - options - WatchersClientCreateOrUpdateOptions contains the optional parameters for the WatchersClient.CreateOrUpdate method. func (client *WatchersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters Watcher, options *WatchersClientCreateOrUpdateOptions) (WatchersClientCreateOrUpdateResponse, error) { var err error + const operationName = "WatchersClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return WatchersClientCreateOrUpdateResponse{}, err @@ -199,10 +210,13 @@ func (client *WatchersClient) BeginDelete(ctx context.Context, resourceGroupName } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -212,6 +226,10 @@ func (client *WatchersClient) BeginDelete(ctx context.Context, resourceGroupName // Generated from API version 2023-05-01 func (client *WatchersClient) deleteOperation(ctx context.Context, resourceGroupName string, networkWatcherName string, options *WatchersClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, networkWatcherName, options) if err != nil { return nil, err @@ -262,6 +280,10 @@ func (client *WatchersClient) deleteCreateRequest(ctx context.Context, resourceG // - options - WatchersClientGetOptions contains the optional parameters for the WatchersClient.Get method. func (client *WatchersClient) Get(ctx context.Context, resourceGroupName string, networkWatcherName string, options *WatchersClientGetOptions) (WatchersClientGetResponse, error) { var err error + const operationName = "WatchersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, networkWatcherName, options) if err != nil { return WatchersClientGetResponse{}, err @@ -331,10 +353,13 @@ func (client *WatchersClient) BeginGetAzureReachabilityReport(ctx context.Contex } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetAzureReachabilityReportResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetAzureReachabilityReportResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetAzureReachabilityReportResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -345,6 +370,10 @@ func (client *WatchersClient) BeginGetAzureReachabilityReport(ctx context.Contex // Generated from API version 2023-05-01 func (client *WatchersClient) getAzureReachabilityReport(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters AzureReachabilityReportParameters, options *WatchersClientBeginGetAzureReachabilityReportOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetAzureReachabilityReport" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getAzureReachabilityReportCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -406,10 +435,13 @@ func (client *WatchersClient) BeginGetFlowLogStatus(ctx context.Context, resourc } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetFlowLogStatusResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetFlowLogStatusResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetFlowLogStatusResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -419,6 +451,10 @@ func (client *WatchersClient) BeginGetFlowLogStatus(ctx context.Context, resourc // Generated from API version 2023-05-01 func (client *WatchersClient) getFlowLogStatus(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters FlowLogStatusParameters, options *WatchersClientBeginGetFlowLogStatusOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetFlowLogStatus" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getFlowLogStatusCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -484,10 +520,13 @@ func (client *WatchersClient) BeginGetNetworkConfigurationDiagnostic(ctx context } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetNetworkConfigurationDiagnosticResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetNetworkConfigurationDiagnosticResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetNetworkConfigurationDiagnosticResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -501,6 +540,10 @@ func (client *WatchersClient) BeginGetNetworkConfigurationDiagnostic(ctx context // Generated from API version 2023-05-01 func (client *WatchersClient) getNetworkConfigurationDiagnostic(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters ConfigurationDiagnosticParameters, options *WatchersClientBeginGetNetworkConfigurationDiagnosticOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetNetworkConfigurationDiagnostic" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getNetworkConfigurationDiagnosticCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -562,10 +605,13 @@ func (client *WatchersClient) BeginGetNextHop(ctx context.Context, resourceGroup } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetNextHopResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetNextHopResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetNextHopResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -575,6 +621,10 @@ func (client *WatchersClient) BeginGetNextHop(ctx context.Context, resourceGroup // Generated from API version 2023-05-01 func (client *WatchersClient) getNextHop(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters NextHopParameters, options *WatchersClientBeginGetNextHopOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetNextHop" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getNextHopCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -629,6 +679,10 @@ func (client *WatchersClient) getNextHopCreateRequest(ctx context.Context, resou // - options - WatchersClientGetTopologyOptions contains the optional parameters for the WatchersClient.GetTopology method. func (client *WatchersClient) GetTopology(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters TopologyParameters, options *WatchersClientGetTopologyOptions) (WatchersClientGetTopologyResponse, error) { var err error + const operationName = "WatchersClient.GetTopology" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getTopologyCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return WatchersClientGetTopologyResponse{}, err @@ -700,10 +754,13 @@ func (client *WatchersClient) BeginGetTroubleshooting(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetTroubleshootingResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetTroubleshootingResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetTroubleshootingResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -713,6 +770,10 @@ func (client *WatchersClient) BeginGetTroubleshooting(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *WatchersClient) getTroubleshooting(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters TroubleshootingParameters, options *WatchersClientBeginGetTroubleshootingOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetTroubleshooting" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getTroubleshootingCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -774,10 +835,13 @@ func (client *WatchersClient) BeginGetTroubleshootingResult(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetTroubleshootingResultResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetTroubleshootingResultResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetTroubleshootingResultResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -787,6 +851,10 @@ func (client *WatchersClient) BeginGetTroubleshootingResult(ctx context.Context, // Generated from API version 2023-05-01 func (client *WatchersClient) getTroubleshootingResult(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters QueryTroubleshootingParameters, options *WatchersClientBeginGetTroubleshootingResultOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetTroubleshootingResult" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getTroubleshootingResultCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -848,10 +916,13 @@ func (client *WatchersClient) BeginGetVMSecurityRules(ctx context.Context, resou } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientGetVMSecurityRulesResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientGetVMSecurityRulesResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientGetVMSecurityRulesResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -861,6 +932,10 @@ func (client *WatchersClient) BeginGetVMSecurityRules(ctx context.Context, resou // Generated from API version 2023-05-01 func (client *WatchersClient) getVMSecurityRules(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters SecurityGroupViewParameters, options *WatchersClientBeginGetVMSecurityRulesOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginGetVMSecurityRules" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getVMSecurityRulesCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -916,6 +991,7 @@ func (client *WatchersClient) NewListPager(resourceGroupName string, options *Wa return false }, Fetcher: func(ctx context.Context, page *WatchersClientListResponse) (WatchersClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "WatchersClient.NewListPager") req, err := client.listCreateRequest(ctx, resourceGroupName, options) if err != nil { return WatchersClientListResponse{}, err @@ -929,6 +1005,7 @@ func (client *WatchersClient) NewListPager(resourceGroupName string, options *Wa } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -973,6 +1050,7 @@ func (client *WatchersClient) NewListAllPager(options *WatchersClientListAllOpti return false }, Fetcher: func(ctx context.Context, page *WatchersClientListAllResponse) (WatchersClientListAllResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "WatchersClient.NewListAllPager") req, err := client.listAllCreateRequest(ctx, options) if err != nil { return WatchersClientListAllResponse{}, err @@ -986,6 +1064,7 @@ func (client *WatchersClient) NewListAllPager(options *WatchersClientListAllOpti } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -1034,10 +1113,13 @@ func (client *WatchersClient) BeginListAvailableProviders(ctx context.Context, r } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientListAvailableProvidersResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientListAvailableProvidersResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientListAvailableProvidersResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1048,6 +1130,10 @@ func (client *WatchersClient) BeginListAvailableProviders(ctx context.Context, r // Generated from API version 2023-05-01 func (client *WatchersClient) listAvailableProviders(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters AvailableProvidersListParameters, options *WatchersClientBeginListAvailableProvidersOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginListAvailableProviders" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.listAvailableProvidersCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -1109,10 +1195,13 @@ func (client *WatchersClient) BeginSetFlowLogConfiguration(ctx context.Context, } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientSetFlowLogConfigurationResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientSetFlowLogConfigurationResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientSetFlowLogConfigurationResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1122,6 +1211,10 @@ func (client *WatchersClient) BeginSetFlowLogConfiguration(ctx context.Context, // Generated from API version 2023-05-01 func (client *WatchersClient) setFlowLogConfiguration(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters FlowLogInformation, options *WatchersClientBeginSetFlowLogConfigurationOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginSetFlowLogConfiguration" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.setFlowLogConfigurationCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err @@ -1176,6 +1269,10 @@ func (client *WatchersClient) setFlowLogConfigurationCreateRequest(ctx context.C // - options - WatchersClientUpdateTagsOptions contains the optional parameters for the WatchersClient.UpdateTags method. func (client *WatchersClient) UpdateTags(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters TagsObject, options *WatchersClientUpdateTagsOptions) (WatchersClientUpdateTagsResponse, error) { var err error + const operationName = "WatchersClient.UpdateTags" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.updateTagsCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return WatchersClientUpdateTagsResponse{}, err @@ -1247,10 +1344,13 @@ func (client *WatchersClient) BeginVerifyIPFlow(ctx context.Context, resourceGro } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WatchersClientVerifyIPFlowResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WatchersClientVerifyIPFlowResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WatchersClientVerifyIPFlowResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -1260,6 +1360,10 @@ func (client *WatchersClient) BeginVerifyIPFlow(ctx context.Context, resourceGro // Generated from API version 2023-05-01 func (client *WatchersClient) verifyIPFlow(ctx context.Context, resourceGroupName string, networkWatcherName string, parameters VerificationIPFlowParameters, options *WatchersClientBeginVerifyIPFlowOptions) (*http.Response, error) { var err error + const operationName = "WatchersClient.BeginVerifyIPFlow" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.verifyIPFlowCreateRequest(ctx, resourceGroupName, networkWatcherName, parameters, options) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webapplicationfirewallpolicies_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webapplicationfirewallpolicies_client.go index afd9d1a13..eb2154b45 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webapplicationfirewallpolicies_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webapplicationfirewallpolicies_client.go @@ -33,7 +33,7 @@ type WebApplicationFirewallPoliciesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewWebApplicationFirewallPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*WebApplicationFirewallPoliciesClient, error) { - cl, err := arm.NewClient(moduleName+".WebApplicationFirewallPoliciesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -55,6 +55,10 @@ func NewWebApplicationFirewallPoliciesClient(subscriptionID string, credential a // method. func (client *WebApplicationFirewallPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, policyName string, parameters WebApplicationFirewallPolicy, options *WebApplicationFirewallPoliciesClientCreateOrUpdateOptions) (WebApplicationFirewallPoliciesClientCreateOrUpdateResponse, error) { var err error + const operationName = "WebApplicationFirewallPoliciesClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, policyName, parameters, options) if err != nil { return WebApplicationFirewallPoliciesClientCreateOrUpdateResponse{}, err @@ -125,10 +129,13 @@ func (client *WebApplicationFirewallPoliciesClient) BeginDelete(ctx context.Cont } poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[WebApplicationFirewallPoliciesClientDeleteResponse]{ FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), }) return poller, err } else { - return runtime.NewPollerFromResumeToken[WebApplicationFirewallPoliciesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[WebApplicationFirewallPoliciesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) } } @@ -138,6 +145,10 @@ func (client *WebApplicationFirewallPoliciesClient) BeginDelete(ctx context.Cont // Generated from API version 2023-05-01 func (client *WebApplicationFirewallPoliciesClient) deleteOperation(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesClientBeginDeleteOptions) (*http.Response, error) { var err error + const operationName = "WebApplicationFirewallPoliciesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.deleteCreateRequest(ctx, resourceGroupName, policyName, options) if err != nil { return nil, err @@ -189,6 +200,10 @@ func (client *WebApplicationFirewallPoliciesClient) deleteCreateRequest(ctx cont // method. func (client *WebApplicationFirewallPoliciesClient) Get(ctx context.Context, resourceGroupName string, policyName string, options *WebApplicationFirewallPoliciesClientGetOptions) (WebApplicationFirewallPoliciesClientGetResponse, error) { var err error + const operationName = "WebApplicationFirewallPoliciesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, resourceGroupName, policyName, options) if err != nil { return WebApplicationFirewallPoliciesClientGetResponse{}, err @@ -252,25 +267,20 @@ func (client *WebApplicationFirewallPoliciesClient) NewListPager(resourceGroupNa return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *WebApplicationFirewallPoliciesClientListResponse) (WebApplicationFirewallPoliciesClientListResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listCreateRequest(ctx, resourceGroupName, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "WebApplicationFirewallPoliciesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, options) + }, nil) if err != nil { return WebApplicationFirewallPoliciesClientListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return WebApplicationFirewallPoliciesClientListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return WebApplicationFirewallPoliciesClientListResponse{}, runtime.NewResponseError(resp) - } return client.listHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } @@ -316,25 +326,20 @@ func (client *WebApplicationFirewallPoliciesClient) NewListAllPager(options *Web return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *WebApplicationFirewallPoliciesClientListAllResponse) (WebApplicationFirewallPoliciesClientListAllResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listAllCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "WebApplicationFirewallPoliciesClient.NewListAllPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAllCreateRequest(ctx, options) + }, nil) if err != nil { return WebApplicationFirewallPoliciesClientListAllResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return WebApplicationFirewallPoliciesClientListAllResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return WebApplicationFirewallPoliciesClientListAllResponse{}, runtime.NewResponseError(resp) - } return client.listAllHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webcategories_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webcategories_client.go index a59062096..26f7c27a7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webcategories_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webcategories_client.go @@ -33,7 +33,7 @@ type WebCategoriesClient struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewWebCategoriesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*WebCategoriesClient, error) { - cl, err := arm.NewClient(moduleName+".WebCategoriesClient", moduleVersion, credential, options) + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } @@ -52,6 +52,10 @@ func NewWebCategoriesClient(subscriptionID string, credential azcore.TokenCreden // - options - WebCategoriesClientGetOptions contains the optional parameters for the WebCategoriesClient.Get method. func (client *WebCategoriesClient) Get(ctx context.Context, name string, options *WebCategoriesClientGetOptions) (WebCategoriesClientGetResponse, error) { var err error + const operationName = "WebCategoriesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() req, err := client.getCreateRequest(ctx, name, options) if err != nil { return WebCategoriesClientGetResponse{}, err @@ -113,25 +117,20 @@ func (client *WebCategoriesClient) NewListBySubscriptionPager(options *WebCatego return page.NextLink != nil && len(*page.NextLink) > 0 }, Fetcher: func(ctx context.Context, page *WebCategoriesClientListBySubscriptionResponse) (WebCategoriesClientListBySubscriptionResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.listBySubscriptionCreateRequest(ctx, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "WebCategoriesClient.NewListBySubscriptionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listBySubscriptionCreateRequest(ctx, options) + }, nil) if err != nil { return WebCategoriesClientListBySubscriptionResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return WebCategoriesClientListBySubscriptionResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return WebCategoriesClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) - } return client.listBySubscriptionHandleResponse(resp) }, + Tracer: client.internal.Tracer(), }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index e84555fbd..169e46972 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,84 @@ # Release History +## 1.1.0 (2023-07-13) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. + +* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). + +### Other Changes + +* Add `dragonfly` to the list of build constraints for `blockblob`. +* Updating version of azcore to 1.6.0 and azidentity to 1.3.0 + +## 1.1.0-beta.1 (2023-05-09) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. + +## 1.0.0 (2023-02-07) + +### Features Added + +* Add support to log calculated block size and count during uploads +* Added MissingSharedKeyCredential error type for cleaner UX. Related to [#19864](https://github.com/Azure/azure-sdk-for-go/issues/19864). + +### Breaking Changes + +* Changed API signatures to map correctly to Azure Storage REST APIs, These changes impact: + * `blob.GetSASURL()` + * `blockblob.StageBlockFromURL()` + * `container.SetAccessPolicy()` + * `container.GetSASURL()` + * `service.GetSASURL()` + * `service.FilterBlobs()` + * `lease.AcquireLease()` (blobs and containers) + * `lease.ChangeLease()` (blobs and containers) +* Type name changes: + * `CpkInfo` -> `CPKInfo` + * `CpkScopeInfo` -> `CPKScopeInfo` + * `RuleId` -> `RuleID` + * `PolicyId` -> `PolicyID` + * `CorsRule` -> `CORSRule` +* Remove `AccountServices` it is now hardcoded to blobs + +### Bugs Fixed + +* Fixed encoding issues seen in FilterBlobs. Fixes [#17421](https://github.com/Azure/azure-sdk-for-go/issues/17421). +* Fixing inconsistency seen with Metadata and ORS response. Fixes [#19688](https://github.com/Azure/azure-sdk-for-go/issues/19688). +* Fixed endless loop during pagination issue [#19773](https://github.com/Azure/azure-sdk-for-go/pull/19773). + +### Other Changes + +* Exported some missing types in the `blob`, `container` and `service` packages. Fixes [#19775](https://github.com/Azure/azure-sdk-for-go/issues/19775). +* SAS changes [#19781](https://github.com/Azure/azure-sdk-for-go/pull/19781): + * AccountSASPermissions: SetImmutabilityPolicy support + * ContainerSASPermissions: Move support + * Validations to ensure correct sas perm ordering + ## 0.6.1 (2022-12-09) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 467fe36cd..2759340f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,47 +1,51 @@ -# Azure Blob Storage SDK for Go +# Azure Blob Storage module for Go -> Server Version: 2020-10-02 +> Service Version: 2020-10-02 -Azure Blob storage is Microsoft's object storage solution for the cloud. Blob -storage is optimized for storing massive amounts of unstructured data. -Unstructured data is data that does not adhere to a particular data model or -definition, such as text or binary data. +Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob +Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or +definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction). -[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] +Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to: -## Getting started - -### Install the package +* Authenticate clients with Azure Blob Storage +* Manipulate containers and blobs in an Azure storage account -Install the Azure Blob Storage SDK for Go with [go get][goget]: +Key links: -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -``` +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples] -If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/azidentity -``` +## Getting started ### Prerequisites -A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). - -You need an [Azure subscription][azure_sub] and a -[Storage Account][storage_account_docs] to use this package. - -To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +- Azure subscription - [Create a free account](https://azure.microsoft.com/free/) +- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], [Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. Here's an example using the Azure CLI: -```Powershell +```bash az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS ``` +### Install the package + +Install the Azure Blob Storage client module for Go with [go get][goget]: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + ### Authenticate the client -In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. +To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. ```go // create a credential for authenticating with Azure Active Directory @@ -53,11 +57,17 @@ client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/ // TODO: handle err ``` -Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). +Learn more about enabling Azure Active Directory for authentication with Azure Storage: + +* [Authorize access to blobs using Azure Active Directory][storage_ad] + +Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see: + +* [Blob samples][samples] ## Key concepts -Blob storage is designed for: +Blob Storage is designed for: - Serving images or documents directly to a browser. - Storing files for distributed access. @@ -66,23 +76,41 @@ Blob storage is designed for: - Storing data for backup and restore, disaster recovery, and archiving. - Storing data for analysis by an on-premises or Azure-hosted service. -Blob storage offers three types of resources: +Blob Storage offers three types of resources: - The _storage account_ - One or more _containers_ in a storage account -- One ore more _blobs_ in a container +- One or more _blobs_ in a container Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. The storage account is specified when the `azblob.Client` is constructed. -Use the appropriate client constructor function for the authentication mechanism you wish to use. -Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) +### Specialized clients + +The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + +- [appendblob][append_blob] +- [blockblob][block_blob] +- [pageblob][page_blob] + +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. + +The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more. + +The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens. +See the package's documentation for more information. ### Goroutine safety -We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. -### About blob metadata -Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. +We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines. + +### Blob metadata + +Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. ### Additional concepts @@ -94,7 +122,7 @@ Blob metadata name/value pairs are valid HTTP headers and should adhere to all r ## Examples -### Uploading a blob +### Upload a blob ```go const ( @@ -122,7 +150,7 @@ _, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) // TODO: handle error ``` -### Downloading a blob +### Download a blob ```go // this example accesses a public blob via anonymous access, so no credentials are required @@ -139,7 +167,7 @@ _, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) // TODO: handle error ``` -### Enumerating blobs +### Enumerate blobs ```go const ( @@ -177,7 +205,7 @@ All Blob service operations will return an [*azcore.ResponseError][azcore_response_error] on failure with a populated `ErrorCode` field. Many of these errors are recoverable. The [bloberror][blob_error] package provides the possible Storage error codes -along with various helper facilities for error handling. +along with helper facilities for error handling. ```go const ( @@ -201,28 +229,7 @@ if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNo ## Next steps -Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. - -### Specialized clients - -The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. -Use these clients when you need to interact with a specific kind of blob. -Learn more about the various types of blobs from the following links. - -- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) -- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) -- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) - -The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. - -The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. - -The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. - -The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. - -The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. -See the package's documentation for more information. +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. ## Contributing @@ -243,19 +250,20 @@ additional questions or comments. [source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob -[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api -[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation +[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview [godevdl]: https://go.dev/dl/ [goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them -[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview -[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell -[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli -[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[azure_cli]: https://docs.microsoft.com/cli/azure +[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main +[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://learn.microsoft.com/cli/azure [azure_sub]: https://azure.microsoft.com/free/ [azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity -[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad [azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError [samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go [append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index b9778e948..fc96b67de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -8,6 +8,7 @@ package appendblob import ( "context" + "errors" "io" "os" "time" @@ -22,9 +23,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage append blob; type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] @@ -34,10 +33,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + pl := runtime.NewPipeline(exported.ModuleName, + exported.ModuleVersion, runtime.PipelineOptions{}, + &conOptions.ClientOptions) return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil } @@ -48,7 +49,10 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + pl := runtime.NewPipeline(exported.ModuleName, + exported.ModuleVersion, + runtime.PipelineOptions{}, + &conOptions.ClientOptions) return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil } @@ -61,7 +65,10 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + pl := runtime.NewPipeline(exported.ModuleName, + exported.ModuleVersion, + runtime.PipelineOptions{}, + &conOptions.ClientOptions) return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil } @@ -166,7 +173,15 @@ func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *Ap } } - resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + resp, err := ab.generated().AppendBlock(ctx, + count, + body, + appendOptions, + leaseAccessConditions, + appendPositionAccessConditions, + cpkInfo, + cpkScope, + modifiedAccessConditions) return resp, err } @@ -174,11 +189,25 @@ func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *Ap // AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { - appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions := o.format() // content length should be 0 on * from URL. always. It's a 400 if it isn't. - resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, - leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + resp, err := ab.generated().AppendBlockFromURL(ctx, + source, + 0, + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions) return resp, err } @@ -186,7 +215,11 @@ func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *Appe // https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() - resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + resp, err := ab.generated().Seal(ctx, + nil, + leaseAccessConditions, + modifiedAccessConditions, + positionAccessConditions) return resp, err } @@ -221,14 +254,10 @@ func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blo return ab.BlobClient().SetLegalHold(ctx, legalHold, options) } -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +// SetTier +// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account. func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return ab.BlobClient().SetTier(ctx, tier, o) + return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account") } // SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. @@ -248,6 +277,12 @@ func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return ab.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return ab.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -256,7 +291,7 @@ func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return ab.BlobClient().SetMetadata(ctx, metadata, o) } @@ -292,10 +327,10 @@ func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.Get return ab.BlobClient().GetTags(ctx, o) } -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +// CopyFromURL +// Deprecated: CopyFromURL works only with block blob func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return ab.BlobClient().CopyFromURL(ctx, copySource, o) + return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") } // Concurrent Download Functions ----------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go index 91c498c0c..0834743f0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -37,9 +37,9 @@ type CreateOptions struct { HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo // Optional. Used to set blob tags in various blob operations. Tags map[string]string @@ -49,10 +49,10 @@ type CreateOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string } -func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } @@ -66,7 +66,7 @@ func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *gen } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -79,35 +79,38 @@ type AppendBlockOptions struct { AppendPositionAccessConditions *AppendPositionAccessConditions - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions } func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions + return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. type AppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. SourceContentValidation blob.SourceContentValidationType AppendPositionAccessConditions *AppendPositionAccessConditions - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions @@ -117,15 +120,16 @@ type AppendBlockFromURLOptions struct { Range blob.HTTPRange } -func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil } options := &generated.AppendBlobClientAppendBlockFromURLOptions{ - SourceRange: exported.FormatHTTPRange(o.Range), + SourceRange: exported.FormatHTTPRange(o.Range), + CopySourceAuthorization: o.CopySourceAuthorization, } if o.SourceContentValidation != nil { @@ -133,7 +137,7 @@ func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendB } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go index 952e556fc..e6851237c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -22,5 +22,5 @@ type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLRe // SealResponse contains the response from method Client.Seal. type SealResponse = generated.AppendBlobClientSealResponse -// SetExpiryResponse contains the response from method BlobClient.SetExpiry. +// SetExpiryResponse contains the response from method Client.SetExpiry. type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json new file mode 100644 index 000000000..4db1d7209 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azblob", + "Tag": "go/storage/azblob_a772b9c866" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 6576b5005..98c166aaf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -8,7 +8,7 @@ package blob import ( "context" - "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "os" "sync" @@ -25,9 +25,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client base.Client[generated.BlobClient] @@ -37,12 +35,12 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + return (*Client)(base.NewBlobClient(blobURL, pl, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -100,6 +98,10 @@ func (b *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.BlobClient])(b)) } +func (b *Client) credential() any { + return base.Credential((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -114,7 +116,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -126,7 +128,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -148,10 +150,10 @@ func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResp // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { opts, leaseAccessConditions, modifiedAccessConditions := o.format() resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) @@ -176,7 +178,7 @@ func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) { +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) { basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) @@ -264,11 +266,19 @@ func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *Co return resp, err } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + // GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) { +func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if b.sharedKey() == nil { - return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } urlParts, err := ParseURL(b.URL()) @@ -281,17 +291,16 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, exp if err != nil { t = time.Time{} } + st := o.format() qps, err := sas.BlobSignatureValues{ ContainerName: urlParts.ContainerName, BlobName: urlParts.BlobName, SnapshotTime: t, Version: sas.Version, - - Permissions: permissions.String(), - - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), }.SignWithSharedKey(b.sharedKey()) if err != nil { @@ -314,12 +323,11 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt count := o.Range.Count if count == CountToEnd { // If size not specified, calculate it // If we don't have the length at all, get it - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) if err != nil { return 0, err } - count = *dr.ContentLength - o.Range.Offset + count = *gr.ContentLength - o.Range.Offset } if count <= 0 { @@ -387,12 +395,12 @@ func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) ( } return DownloadStreamResponse{ - client: b, - BlobClientDownloadResponse: dr, - getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, - ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), - cpkInfo: o.CpkInfo, - cpkScope: o.CpkScopeInfo, + client: b, + DownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CPKInfo, + cpkScope: o.CPKScopeInfo, }, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go index 9cd65bb4b..c15635448 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -47,7 +47,7 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { return generated.PossibleDeleteSnapshotsOptionTypeValues() } -// AccessTier defines values for Blob Access Tier +// AccessTier defines values for Blob Access Tier. type AccessTier = generated.AccessTier const ( @@ -129,7 +129,7 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { return generated.PossibleCopyStatusTypeValues() } -// EncryptionAlgorithmType defines values for EncryptionAlgorithmType +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. type EncryptionAlgorithmType = generated.EncryptionAlgorithmType const ( @@ -142,7 +142,7 @@ func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return generated.PossibleEncryptionAlgorithmTypeValues() } -// ArchiveStatus defines values for ArchiveStatus +// ArchiveStatus defines values for ArchiveStatus. type ArchiveStatus = generated.ArchiveStatus const ( @@ -155,7 +155,7 @@ func PossibleArchiveStatusValues() []ArchiveStatus { return generated.PossibleArchiveStatusValues() } -// DeleteType defines values for DeleteType +// DeleteType defines values for DeleteType. type DeleteType = generated.DeleteType const ( @@ -204,7 +204,7 @@ type SourceContentValidationType interface { notPubliclyImplementable() } -// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provided a precomputed CRC64. +// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64. type SourceContentValidationTypeCRC64 []byte // Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64. @@ -216,7 +216,7 @@ func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {} var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil) -// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provided a precomputed MD5. +// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5. type SourceContentValidationTypeMD5 []byte // Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index bc0f4533c..fea2f1ec5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -34,11 +34,11 @@ type LeaseAccessConditions = exported.LeaseAccessConditions // ModifiedAccessConditions contains a group of parameters for specifying access conditions. type ModifiedAccessConditions = exported.ModifiedAccessConditions -// CpkInfo contains a group of parameters for client provided encryption key. -type CpkInfo = generated.CpkInfo +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = generated.CPKInfo -// CpkScopeInfo contains a group of parameters for client provided encryption scope. -type CpkScopeInfo = generated.CpkScopeInfo +// CPKScopeInfo contains a group of parameters for client provided encryption scope. +type CPKScopeInfo = generated.CPKScopeInfo // HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. type HTTPHeaders = generated.BlobHTTPHeaders @@ -66,11 +66,11 @@ type DownloadStreamOptions struct { Range HTTPRange AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } @@ -81,7 +81,7 @@ func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions + return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -101,10 +101,10 @@ type downloadOptions struct { AccessConditions *AccessConditions // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo - // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. @@ -117,7 +117,7 @@ func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { } return &GetPropertiesOptions{ AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, + CPKInfo: o.CPKInfo, } } @@ -127,8 +127,8 @@ func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContent } return &DownloadStreamOptions{ AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, Range: rnge, RangeGetContentMD5: rangeGetContentMD5, } @@ -148,13 +148,13 @@ type DownloadBufferOptions struct { // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. AccessConditions *AccessConditions - // CpkInfo contains a group of parameters for client provided encryption key. - CpkInfo *CpkInfo + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo - // CpkScopeInfo contains a group of parameters for client provided encryption scope. - CpkScopeInfo *CpkScopeInfo + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo - // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. @@ -176,8 +176,8 @@ type DownloadFileOptions struct { AccessConditions *AccessConditions // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. Concurrency uint16 @@ -191,7 +191,7 @@ type DownloadFileOptions struct { // DeleteOptions contains the optional parameters for the Client.Delete method. type DeleteOptions struct { // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. DeleteSnapshots *DeleteSnapshotsOptionType AccessConditions *AccessConditions // Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs. @@ -253,17 +253,17 @@ func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generat // GetPropertiesOptions contains the optional parameters for the Client.GetProperties method type GetPropertiesOptions struct { AccessConditions *AccessConditions - CpkInfo *CpkInfo + CPKInfo *CPKInfo } func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, - *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions + return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -287,32 +287,32 @@ func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOpt // SetMetadataOptions provides set of configurations for Set Metadata on blob operation type SetMetadataOptions struct { AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo, - *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo, + *CPKScopeInfo, *ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. type CreateSnapshotOptions struct { - Metadata map[string]string + Metadata map[string]*string AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } @@ -321,7 +321,7 @@ func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOpt return &generated.BlobClientCreateSnapshotOptions{ Metadata: o.Metadata, - }, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions + }, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -341,7 +341,7 @@ type StartCopyFromURLOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Optional: Indicates the priority with which to rehydrate an archived blob. RehydratePriority *RehydratePriority // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. @@ -496,6 +496,27 @@ func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions // --------------------------------------------------------------------------------------------------------------------- +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. type CopyFromURLOptions struct { // Optional. Used to set blob tags in various blob operations. @@ -514,7 +535,7 @@ type CopyFromURLOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Specify the md5 calculated for the range of bytes that must be read from the copy source. SourceContentMD5 []byte // Optional. Indicates the tier to be set on the blob. @@ -544,3 +565,14 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions } + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go index 6da411b78..352d97526 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -13,22 +13,25 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) +// DownloadResponse contains the response from method BlobClient.Download. +type DownloadResponse = generated.BlobClientDownloadResponse + // DownloadStreamResponse contains the response from the DownloadStream method. // To read from the stream, read from the Body field, or call the NewRetryReader method. type DownloadStreamResponse struct { - generated.BlobClientDownloadResponse + DownloadResponse ObjectReplicationRules []ObjectReplicationPolicy client *Client getInfo httpGetterInfo - cpkInfo *CpkInfo - cpkScope *CpkScopeInfo + cpkInfo *CPKInfo + cpkScope *CPKScopeInfo } // NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while // reading, it will make additional requests to reestablish a connection and continue reading. // Pass nil for options to accept the default options. -// Callers of this method should not access the DowloadStreamResponse.Body field. +// Callers of this method should not access the DownloadStreamResponse.Body field. func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { if options == nil { options = &RetryReaderOptions{} @@ -41,8 +44,8 @@ func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *Re options := DownloadStreamOptions{ Range: getInfo.Range, AccessConditions: accessConditions, - CpkInfo: r.cpkInfo, - CpkScopeInfo: r.cpkScope, + CPKInfo: r.cpkInfo, + CPKScopeInfo: r.cpkScope, } resp, err := r.client.DownloadStream(ctx, &options) if err != nil { @@ -97,6 +100,9 @@ type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse // CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse +// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse + // AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index cc1b1365d..1deedb590 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -58,7 +58,7 @@ type RetryReaderOptions struct { injectedError error } -// RetryReader attempts to read from response, and if there is retriable network error +// RetryReader attempts to read from response, and if there is a retry-able network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. @@ -167,7 +167,7 @@ func (s *RetryReader) Read(p []byte) (n int, err error) { // net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" // which is exactly the behaviour we want. // NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) -// then there are two different types of error that may happen - either the one one we check for here, +// then there are two different types of error that may happen - either the one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. func (s *RetryReader) wasRetryableEarlyClose(err error) bool { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go index 7b4c8e248..c2d517d8a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -14,25 +14,25 @@ import ( // ObjectReplicationRules struct type ObjectReplicationRules struct { - RuleId string + RuleID string Status string } -// ObjectReplicationPolicy are deserialized attributes +// ObjectReplicationPolicy are deserialized attributes. type ObjectReplicationPolicy struct { - PolicyId *string + PolicyID *string Rules *[]ObjectReplicationRules } -// deserializeORSPolicies is utility function to deserialize ORS Policies -func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { +// deserializeORSPolicies is utility function to deserialize ORS Policies. +func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) { if policies == nil { return nil } // For source blobs (blobs that have policy ids and rule ids applied to them), // the header will be formatted as "x-ms-or-_: {Complete, Failed}". // The value of this header is the status of the replication. - orPolicyStatusHeader := make(map[string]string) + orPolicyStatusHeader := make(map[string]*string) for key, value := range policies { if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { orPolicyStatusHeader[key] = value @@ -44,19 +44,19 @@ func deserializeORSPolicies(policies map[string]string) (objectReplicationPolici policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] - parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value}) } for policyId, rules := range parsedResult { objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ - PolicyId: &policyId, + PolicyID: &policyId, Rules: &rules, }) } return } -// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders. func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { return HTTPHeaders{ BlobContentType: resp.ContentType, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 000000000..ad653c1c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + CRC64Mismatch Code = "Crc64Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 1168fe68c..d7e5aa0dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -12,6 +12,7 @@ import ( "encoding/base64" "errors" "io" + "math" "os" "sync" "time" @@ -20,6 +21,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" @@ -29,9 +31,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client defines a set of operations applicable to block blobs. type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] @@ -41,7 +41,7 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) @@ -164,6 +164,19 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U return resp, err } +// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten +// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url +func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() + + resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) + + return resp, err +} + // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. @@ -189,12 +202,11 @@ func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io. // StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. // If count is CountToEnd (0), then data is read from specified offset to the end. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, - contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() - resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) return resp, err @@ -218,8 +230,8 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, var commitOptions *generated.BlockBlobClientCommitBlockListOptions var headers *generated.BlobHTTPHeaders var leaseAccess *blob.LeaseAccessConditions - var cpkInfo *generated.CpkInfo - var cpkScope *generated.CpkScopeInfo + var cpkInfo *generated.CPKInfo + var cpkScope *generated.CPKScopeInfo var modifiedAccess *generated.ModifiedAccessConditions if options != nil { @@ -238,8 +250,8 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, headers = options.HTTPHeaders leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) - cpkInfo = options.CpkInfo - cpkScope = options.CpkScopeInfo + cpkInfo = options.CPKInfo + cpkScope = options.CPKScopeInfo } resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) @@ -291,7 +303,7 @@ func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blo // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. // For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. @@ -316,6 +328,12 @@ func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return bb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return bb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -324,7 +342,7 @@ func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return bb.BlobClient().SetMetadata(ctx, metadata, o) } @@ -370,31 +388,26 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co // uploadFromReader uploads a buffer in blocks to a block blob. func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { - readerSize := actualSize if o.BlockSize == 0 { // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error - if readerSize > MaxStageBlockBytes*MaxBlocks { + if actualSize > MaxStageBlockBytes*MaxBlocks { return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") } // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { o.BlockSize = MaxUploadBlobBytes // Default if unspecified } else { - if remainder := readerSize % MaxBlocks; remainder > 0 { - // ensure readerSize is a multiple of MaxBlocks - readerSize += (MaxBlocks - remainder) - } - o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = blob.DefaultDownloadBlockSize } // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). } } - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) if o.Progress != nil { body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) } @@ -405,19 +418,27 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu return toUploadReaderAtResponseFromUploadResponse(resp), err } - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) if numBlocks > MaxBlocks { // prevent any math bugs from attempting to upload too many blocks which will always fail return uploadFromReaderResponse{}, errors.New("block limit exceeded") } + if log.Should(exported.EventUpload) { + urlparts, err := blob.ParseURL(bb.generated().Endpoint()) + if err == nil { + log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v", + urlparts.BlobName, actualSize, o.BlockSize, numBlocks) + } + } + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs progress := int64(0) progressLock := &sync.Mutex{} err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ OperationName: "uploadFromReader", - TransferSize: readerSize, + TransferSize: actualSize, ChunkSize: o.BlockSize, Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go index 6a7796f44..cb1162640 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -9,7 +9,7 @@ package blockblob import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" const ( - // CountToEnd specifies the end of the file + // CountToEnd specifies the end of the file. CountToEnd = 0 _1MiB = 1024 * 1024 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go index dcccc37c0..5f7f4d828 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go @@ -1,6 +1,6 @@ -//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris) +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) // +build go1.18 -// +build linux darwin freebsd openbsd netbsd solaris +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go index 2acef3a72..3f966d65b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go @@ -26,7 +26,9 @@ func newMMB(size int64) (mmb, error) { if err != nil { return nil, os.NewSyscallError("CreateFileMapping", err) } - defer syscall.CloseHandle(hMMF) + defer func() { + _ = syscall.CloseHandle(hMMF) + }() addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go index 9da768bdb..ba1b9ee9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -20,7 +20,7 @@ import ( // Block - Represents a single block in a block blob. It describes the block's ID and size. type Block = generated.Block -// BlockList - type of blocklist (committed/uncommitted) +// BlockList - can be uncommitted or committed blocks (committed/uncommitted) type BlockList = generated.BlockList // Request Model Declaration ------------------------------------------------------------------------------------------- @@ -31,7 +31,7 @@ type UploadOptions struct { Tags map[string]string // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string + Metadata map[string]*string // Optional. Indicates the tier to be set on the blob. Tier *blob.AccessTier @@ -40,8 +40,8 @@ type UploadOptions struct { TransactionalContentMD5 []byte HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting @@ -49,7 +49,7 @@ type UploadOptions struct { } func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } @@ -65,16 +65,66 @@ func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *gene } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. +type UploadBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Additional optional headers + HTTPHeaders *blob.HTTPHeaders + AccessConditions *blob.AccessConditions + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions +} + +func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, + *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := generated.BlockBlobClientPutBlobFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + CopySourceAuthorization: o.CopySourceAuthorization, + CopySourceBlobProperties: o.CopySourceBlobProperties, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // StageBlockOptions contains the optional parameters for the Client.StageBlock method. type StageBlockOptions struct { - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo LeaseAccessConditions *blob.LeaseAccessConditions @@ -84,12 +134,12 @@ type StageBlockOptions struct { } // StageBlockOptions contains the optional parameters for the Client.StageBlock method. -func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) { +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) { if o == nil { return nil, nil, nil, nil } - return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo + return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- @@ -109,12 +159,12 @@ type StageBlockFromURLOptions struct { // Range specifies a range of bytes. The default value is all bytes. Range blob.HTTPRange - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo } -func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } @@ -128,7 +178,7 @@ func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBloc o.SourceContentValidation.Apply(options) } - return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions + return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -136,15 +186,15 @@ func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBloc // CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. type CommitBlockListOptions struct { Tags map[string]string - Metadata map[string]string + Metadata map[string]*string RequestID *string Tier *blob.AccessTier Timeout *int32 TransactionalContentCRC64 []byte TransactionalContentMD5 []byte HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting @@ -183,7 +233,7 @@ type uploadFromReaderOptions struct { HTTPHeaders *blob.HTTPHeaders // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata map[string]string + Metadata map[string]*string // AccessConditions indicates the access conditions for the block blob. AccessConditions *blob.AccessConditions @@ -195,8 +245,8 @@ type uploadFromReaderOptions struct { Tags map[string]string // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) Concurrency uint16 @@ -209,17 +259,17 @@ type uploadFromReaderOptions struct { TransactionalContentMD5 []byte } -// UploadBufferOptions provides set of configurations for UploadBuffer operation +// UploadBufferOptions provides set of configurations for UploadBuffer operation. type UploadBufferOptions = uploadFromReaderOptions -// UploadFileOptions provides set of configurations for UploadFile operation +// UploadFileOptions provides set of configurations for UploadFile operation. type UploadFileOptions = uploadFromReaderOptions func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) return &StageBlockOptions{ - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, LeaseAccessConditions: leaseAccessConditions, TransactionalValidation: o.TransactionalValidation, @@ -233,8 +283,8 @@ func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { Tier: o.AccessTier, HTTPHeaders: o.HTTPHeaders, AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, } } @@ -244,16 +294,16 @@ func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOp Metadata: o.Metadata, Tier: o.AccessTier, HTTPHeaders: o.HTTPHeaders, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, } } // --------------------------------------------------------------------------------------------------------------------- -// UploadStreamOptions provides set of configurations for UploadStream operation +// UploadStreamOptions provides set of configurations for UploadStream operation. type UploadStreamOptions struct { - // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. + // BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. BlockSize int64 // Concurrency defines the max number of concurrent uploads to be performed to upload the file. @@ -263,12 +313,12 @@ type UploadStreamOptions struct { TransactionalValidation blob.TransferValidationType HTTPHeaders *blob.HTTPHeaders - Metadata map[string]string + Metadata map[string]*string AccessConditions *blob.AccessConditions AccessTier *blob.AccessTier Tags map[string]string - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo } func (u *UploadStreamOptions) setDefaults() { @@ -289,8 +339,8 @@ func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) return &StageBlockOptions{ TransactionalValidation: u.TransactionalValidation, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, LeaseAccessConditions: leaseAccessConditions, } } @@ -305,8 +355,8 @@ func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOption Metadata: u.Metadata, Tier: u.AccessTier, HTTPHeaders: u.HTTPHeaders, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, AccessConditions: u.AccessConditions, } } @@ -321,27 +371,27 @@ func (u *UploadStreamOptions) getUploadOptions() *UploadOptions { Metadata: u.Metadata, Tier: u.AccessTier, HTTPHeaders: u.HTTPHeaders, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, AccessConditions: u.AccessConditions, } } // --------------------------------------------------------------------------------------------------------------------- -// ExpiryType defines values for ExpiryType +// ExpiryType defines values for ExpiryType. type ExpiryType = exported.ExpiryType -// ExpiryTypeAbsolute defines the absolute time for the blob expiry +// ExpiryTypeAbsolute defines the absolute time for the blob expiry. type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute -// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry. type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow -// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry. type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation -// ExpiryTypeNever defines that the blob will be set to never expire +// ExpiryTypeNever defines that the blob will be set to never expire. type ExpiryTypeNever = exported.ExpiryTypeNever // SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go index de2ac0106..917f71809 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -16,6 +16,9 @@ import ( // UploadResponse contains the response from method Client.Upload. type UploadResponse = generated.BlockBlobClientUploadResponse +// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL +type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse + // StageBlockResponse contains the response from method Client.StageBlock. type StageBlockResponse = generated.BlockBlobClientStageBlockResponse @@ -110,5 +113,5 @@ type UploadBufferResponse = uploadFromReaderResponse // UploadStreamResponse contains the response from method Client.CommitBlockList. type UploadStreamResponse = CommitBlockListResponse -// SetExpiryResponse contains the response from method BlobClient.SetExpiry. +// SetExpiryResponse contains the response from method Client.SetExpiry. type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index e0623f50e..f5100e131 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -26,3 +26,8 @@ stages: parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + EnvVars: + AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) + AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 59299acb6..5c4b719c4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -13,14 +13,13 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go index d4c3262d9..c42fcdec7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -10,7 +10,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) -// PublicAccessType defines values for AccessType - private (default) or blob or container +// PublicAccessType defines values for AccessType - private (default) or blob or container. type PublicAccessType = generated.PublicAccessType const ( @@ -23,7 +23,7 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { return generated.PossiblePublicAccessTypeValues() } -// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType. type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType const ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go new file mode 100644 index 000000000..83edea72b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 091dab85e..2ac5a820a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -7,8 +7,12 @@ package container import ( + "bytes" "context" "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" "net/url" "time" @@ -28,9 +32,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. type Client base.Client[generated.ContainerClient] @@ -40,12 +42,12 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + return (*Client)(base.NewContainerClient(containerURL, pl, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -102,6 +104,15 @@ func (c *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ContainerClient])(c)) } +func (c *Client) credential() any { + return base.Credential((*base.Client[generated.ContainerClient])(c)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() @@ -113,7 +124,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.credential())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -147,13 +158,13 @@ func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { var opts *generated.ContainerClientCreateOptions - var cpkScopes *generated.ContainerCpkScopeInfo + var cpkScopes *generated.ContainerCPKScopeInfo if options != nil { opts = &generated.ContainerClientCreateOptions{ Access: options.Access, Metadata: options.Metadata, } - cpkScopes = options.CpkScopeInfo + cpkScopes = options.CPKScopeInfo } resp, err := c.generated().Create(ctx, opts, cpkScopes) @@ -190,7 +201,7 @@ func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, op // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. + // This allows us to not expose a GetMetadata method at all simplifying the API. // The optionals are nil, like they were in track 1.5 opts, leaseAccessConditions := o.format() @@ -217,15 +228,20 @@ func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) // SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { - accessPolicy, mac, lac := o.format() - for _, c := range containerACL { - err := formatTime(c) - if err != nil { - return SetAccessPolicyResponse{}, err - } +func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac, acl, err := o.format() + if err != nil { + return SetAccessPolicyResponse{}, err } - resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac) + resp, err := c.generated().SetAccessPolicy(ctx, acl, accessPolicy, mac, lac) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions) return resp, err } @@ -271,12 +287,9 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L // NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the // previously-returned Marker) to get the next segment. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. -// AutoPagerBufferSize specifies the channel's buffer size. -// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { listOptions := o.format() return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ @@ -309,23 +322,22 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar // GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) { +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if c.sharedKey() == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } - + st := o.format() urlParts, err := blob.ParseURL(c.URL()) if err != nil { return "", err } - // Containers do not have snapshots, nor versions. qps, err := sas.BlobSignatureValues{ Version: sas.Version, Protocol: sas.ProtocolHTTPS, ContainerName: urlParts.ContainerName, Permissions: permissions.String(), - StartTime: start.UTC(), + StartTime: st, ExpiryTime: expiry.UTC(), }.SignWithSharedKey(c.sharedKey()) if err != nil { @@ -336,3 +348,67 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time return endpoint, nil } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := c.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: c.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go index b1439e6f3..09a8e8ed3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -34,7 +34,7 @@ func PossibleAccessTierValues() []AccessTier { return generated.PossibleAccessTierValues() } -// PublicAccessType defines values for AccessType - private (default) or blob or container +// PublicAccessType defines values for AccessType - private (default) or blob or container. type PublicAccessType = generated.PublicAccessType const ( @@ -47,7 +47,7 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { return generated.PossiblePublicAccessTypeValues() } -// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS. type SKUName = generated.SKUName const ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index 1e2e73f11..f1724861e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -7,6 +7,7 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "time" @@ -25,19 +26,28 @@ func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredentia // Request Model Declaration ------------------------------------------------------------------------------------------- -// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CpkScopeInfo = generated.ContainerCpkScopeInfo +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = generated.ContainerCPKScopeInfo -// BlobProperties - Properties of a blob +// BlobFlatListSegment - List of BlobItem. +type BlobFlatListSegment = generated.BlobFlatListSegment + +// BlobHierarchyListSegment - List of BlobItem and BlobPrefix. +type BlobHierarchyListSegment = generated.BlobHierarchyListSegment + +// BlobProperties - Properties of a blob. type BlobProperties = generated.BlobProperties -// BlobItem - An Azure Storage blob +// BlobItem - An Azure Storage blob. type BlobItem = generated.BlobItem +// BlobTags - Blob tags. +type BlobTags = generated.BlobTags + // BlobPrefix is a blob's prefix when hierarchically listing blobs. type BlobPrefix = generated.BlobPrefix -// BlobTag - a key/value pair on a blob +// BlobTag - a key/value pair on a blob. type BlobTag = generated.BlobTag // AccessConditions identifies container-specific access conditions which you optionally set. @@ -49,28 +59,28 @@ type LeaseAccessConditions = exported.LeaseAccessConditions // ModifiedAccessConditions contains a group of parameters for specifying access conditions. type ModifiedAccessConditions = exported.ModifiedAccessConditions -// AccessPolicy - An Access policy +// AccessPolicy - An Access policy. type AccessPolicy = generated.AccessPolicy // AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. // Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. type AccessPolicyPermission = exported.AccessPolicyPermission -// SignedIdentifier - signed identifier +// SignedIdentifier - signed identifier. type SignedIdentifier = generated.SignedIdentifier // Request Model Declaration ------------------------------------------------------------------------------------------- // CreateOptions contains the optional parameters for the Client.Create method. type CreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access + // Specifies whether data in the container may be accessed publicly and the level of access. Access *PublicAccessType // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string + Metadata map[string]*string // Optional. Specifies the encryption scope settings to set on the container. - CpkScopeInfo *CpkScopeInfo + CPKScopeInfo *CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- @@ -170,11 +180,11 @@ type ListBlobsFlatOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Filters the results to return only containers whose name begins with the specified prefix. Prefix *string @@ -192,11 +202,11 @@ type ListBlobsHierarchyOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Filters the results to return only containers whose name begins with the specified prefix. Prefix *string @@ -218,9 +228,30 @@ func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHi // --------------------------------------------------------------------------------------------------------------------- +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. type SetMetadataOptions struct { - Metadata map[string]string + Metadata map[string]*string LeaseAccessConditions *LeaseAccessConditions ModifiedAccessConditions *ModifiedAccessConditions } @@ -250,23 +281,31 @@ func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPo // --------------------------------------------------------------------------------------------------------------------- -// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation. type SetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. + // Specifies whether data in the container may be accessed publicly and the level of access. + // If this header is not included in the request, container data is private to the account owner. + Access *PublicAccessType AccessConditions *AccessConditions + ContainerACL []*SignedIdentifier } -func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions, []*SignedIdentifier, error) { if o == nil { - return nil, nil, nil + return nil, nil, nil, nil, nil + } + if o.ContainerACL != nil { + for _, c := range o.ContainerACL { + err := formatTime(c) + if err != nil { + return nil, nil, nil, nil, err + } + } } lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) return &generated.ContainerClientSetAccessPolicyOptions{ Access: o.Access, - }, lac, mac + }, lac, mac, o.ContainerACL, nil } func formatTime(c *SignedIdentifier) error { @@ -291,3 +330,70 @@ func formatTime(c *SignedIdentifier) error { return nil } + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go index 9d8672b13..4d1e406ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -7,6 +7,7 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -25,9 +26,15 @@ type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse // ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse + // ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse = generated.ListBlobsHierarchySegmentResponse + // SetMetadataResponse contains the response from method Client.SetMetadata. type SetMetadataResponse = generated.ContainerClientSetMetadataResponse @@ -36,3 +43,24 @@ type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse // SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go index d5b6ed6a7..9a4806c57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -51,7 +51,7 @@ Use the key as the credential parameter to authenticate the client: cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) @@ -59,11 +59,12 @@ Use the key as the credential parameter to authenticate the client: Using a Connection String Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. -To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" - serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil) + handle(err) Using a Shared Access Signature (SAS) Token @@ -82,20 +83,20 @@ You can generate a SAS token from the Azure Portal under Shared Access Signature cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) // Alternatively, you can create SAS on the fly - resources := azblob.AccountSASResourceTypes{Service: true} - permission := azblob.AccountSASPermissions{Read: true} + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} start := time.Now() expiry := start.AddDate(0, 0, 1) - serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) handle(err) - serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil) handle(err) fmt.Println(serviceClientWithSAS.URL()) @@ -135,13 +136,13 @@ Examples handle(err) // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ - serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) handle(err) // ===== 1. Create a container ===== // First, create a container client, and use the Create method to create a new container in your account - containerClient, err := serviceClient.NewContainerClient("testcontainer") + containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer") handle(err) // All APIs have an options' bag struct as a parameter. @@ -154,13 +155,13 @@ Examples uploadData := "Hello world!" // Create a new blockBlobClient from the containerClient - blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt") handle(err) // Upload data to the block blob - blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ - Metadata: map[string]string{"Foo": "Bar"}, - TagsMap: map[string]string{"Year": "2022"}, + blockBlobUploadOptions := blockblob.UploadOptions{ + Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, + Tags: map[string]string{"Year": "2022"}, } _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) handle(err) @@ -175,10 +176,9 @@ Examples downloadData, err := io.ReadAll(reader) handle(err) if string(downloadData) != uploadData { - handle(errors.New("Uploaded data should be same as downloaded data")) + handle(errors.New("uploaded data should be same as downloaded data")) } - if err = reader.Close(); err != nil { handle(err) return @@ -189,18 +189,15 @@ Examples // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. // PageResponse() can be used to iterate over the results of the specific page. // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. - pager := containerClient.ListBlobsFlat(nil) - for pager.NextPage(context.TODO()) { - resp := pager.PageResponse() + pager := containerClient.NewListBlobsFlatPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) for _, v := range resp.Segment.BlobItems { fmt.Println(*v.Name) } } - if err = pager.Err(); err != nil { - handle(err) - } - // Delete the blob. _, err = blockBlobClient.Delete(context.TODO(), nil) handle(err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 16e6cac06..6c3cc5c93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -7,14 +7,20 @@ package base import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + type Client[T any] struct { - inner *T - sharedKey *exported.SharedKeyCredential + inner *T + credential any } func InnerClient[T any](client *Client[T]) *T { @@ -22,31 +28,40 @@ func InnerClient[T any](client *Client[T]) *T { } func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { - return client.sharedKey + switch cred := client.credential.(type) { + case *exported.SharedKeyCredential: + return cred + default: + return nil + } +} + +func Credential[T any](client *Client[T]) any { + return client.credential } func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewServiceClient(containerURL, pipeline), + credential: credential, } } -func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewContainerClient(containerURL, pipeline), + credential: credential, } } -func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, pipeline runtime.Pipeline, credential any) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewBlobClient(blobURL, pipeline), + credential: credential, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go new file mode 100644 index 000000000..64a88688a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -0,0 +1,279 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strconv" + "strings" +) + +const ( + batchIdPrefix = "batch_" + httpVersion = "HTTP/1.1" + httpNewline = "\r\n" +) + +// createBatchID is used for creating a new batch id which is used as batch boundary in the request body +func createBatchID() (string, error) { + batchID, err := uuid.New() + if err != nil { + return "", err + } + + return batchIdPrefix + batchID.String(), nil +} + +// buildSubRequest is used for building the sub-request. Example: +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Content-Length: 0 +func buildSubRequest(req *policy.Request) []byte { + var batchSubRequest strings.Builder + blobPath := req.Raw().URL.Path + if len(req.Raw().URL.RawQuery) > 0 { + blobPath += "?" + req.Raw().URL.RawQuery + } + + batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) + + for k, v := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + continue + } + if len(v) > 0 { + batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) + } + } + + batchSubRequest.WriteString(httpNewline) + return []byte(batchSubRequest.String()) +} + +// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. +// +// Example of a sub-request in the batch request body: +// +// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 +// Content-Type: application/http +// Content-Transfer-Encoding: binary +// Content-ID: 0 +// +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Content-Length: 0 +func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { + batchID, err := createBatchID() + if err != nil { + return nil, "", err + } + + // Create a new multipart buffer + reqBody := &bytes.Buffer{} + writer := multipart.NewWriter(reqBody) + + // Set the boundary + err = writer.SetBoundary(batchID) + if err != nil { + return nil, "", err + } + + partHeaders := make(textproto.MIMEHeader) + partHeaders["Content-Type"] = []string{"application/http"} + partHeaders["Content-Transfer-Encoding"] = []string{"binary"} + var partWriter io.Writer + + for i, req := range bb.SubRequests { + if bb.AuthPolicy != nil { + _, err := bb.AuthPolicy.Do(req) + if err != nil && !strings.EqualFold(err.Error(), "no more policies") { + if log.Should(EventSubmitBatch) { + log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) + } + return nil, "", err + } + } + + partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} + partWriter, err = writer.CreatePart(partHeaders) + if err != nil { + return nil, "", err + } + + _, err = partWriter.Write(buildSubRequest(req)) + if err != nil { + return nil, "", err + } + } + + // Close the multipart writer + err = writer.Close() + if err != nil { + return nil, "", err + } + + return reqBody.Bytes(), batchID, nil +} + +// UpdateSubRequestHeaders updates the sub-request headers. +// Removes x-ms-version header. +func UpdateSubRequestHeaders(req *policy.Request) { + // remove x-ms-version header from the request header + for k := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + delete(req.Raw().Header, k) + } + } +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem struct { + ContentID *int + ContainerName *string + BlobName *string + RequestID *string + Version *string + Error error // nil error indicates that the batch sub-request operation is successful +} + +func getResponseBoundary(contentType *string) (string, error) { + if contentType == nil { + return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") + } + + _, params, err := mime.ParseMediaType(*contentType) + if err != nil { + return "", err + } + + if val, ok := params["boundary"]; ok { + return val, nil + } else { + return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) + } +} + +func getContentID(part *multipart.Part) (*int, error) { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + return nil, nil + } + + val, err := strconv.Atoi(strings.TrimSpace(contentID)) + if err != nil { + return nil, err + } + return &val, nil +} + +func getResponseHeader(key string, resp *http.Response) *string { + val := resp.Header.Get(key) + if val == "" { + return nil + } + return &val +} + +// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. +func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { + boundary, err := getResponseBoundary(contentType) + if err != nil { + return nil, err + } + + respReader := multipart.NewReader(respBody, boundary) + var responses []*BatchResponseItem + + for { + part, err := respReader.NextPart() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + batchSubResponse := &BatchResponseItem{} + batchSubResponse.ContentID, err = getContentID(part) + if err != nil { + return nil, err + } + + if batchSubResponse.ContentID != nil { + path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") + p := strings.Split(path, "/") + batchSubResponse.ContainerName = to.Ptr(p[0]) + batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) + } + + respBytes, err := io.ReadAll(part) + if err != nil { + return nil, err + } + respBytes = append(respBytes, byte('\n')) + buf := bytes.NewBuffer(respBytes) + resp, err := http.ReadResponse(bufio.NewReader(buf), nil) + // sub-response parsing error + if err != nil { + return nil, err + } + + batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) + batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) + + // sub-response failure + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if len(responses) == 0 && batchSubResponse.ContentID == nil { + // this case can happen when the parent request fails. + // For example, batch request having more than 256 sub-requests. + return nil, fmt.Errorf("%v", string(respBytes)) + } + + resp.Request = subRequests[*batchSubResponse.ContentID].Raw() + batchSubResponse.Error = runtime.NewResponseError(resp) + } + + responses = append(responses, batchSubResponse) + } + + if len(responses) != len(subRequests) { + return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) + } + + return responses, nil +} + +// not exported but used for batch request creation + +// BlobBatchBuilder is used for creating the blob batch request +type BlobBatchBuilder struct { + AuthPolicy policy.Policy + SubRequests []*policy.Request +} + +// BlobBatchOperationType defines the operation of the blob batch sub-requests. +type BlobBatchOperationType string + +const ( + BatchDeleteOperationType BlobBatchOperationType = "delete" + BatchSetTierOperationType BlobBatchOperationType = "set tier" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go new file mode 100644 index 000000000..d775fb5c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azblob/log.go +const ( + // EventUpload is used when we compute number of blocks to upload and size of each block. + EventUpload log.Event = "azblob.Upload" + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch log.Event = "azblob.SubmitBatch" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 5ed5e382f..f27f52983 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azblob" - ModuleVersion = "v0.6.1" + ModuleVersion = "v1.1.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 5a3520160..3044a608a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -19,7 +19,7 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.43" +use: "@autorest/go@4.0.0-preview.45" ``` ### Remove pager methods and export various generated methods in container client @@ -30,7 +30,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); ``` @@ -43,7 +43,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); ``` @@ -321,3 +321,117 @@ directive: from: BlobPropertiesInternal to: BlobProperties ``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `where` parameter in blob filtering to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FilterBlobsWhere + transform: > + $.required = true; +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); +``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, + `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go index c3d3c2607..478962de1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -6,7 +6,15 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) func (client *BlobClient) Endpoint() string { return client.endpoint @@ -15,3 +23,11 @@ func (client *BlobClient) Endpoint() string { func (client *BlobClient) Pipeline() runtime.Pipeline { return client.pl } + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index 221ca2951..d0ddb9064 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -29,8 +29,8 @@ type AppendBlobClient struct { } // NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { client := &AppendBlobClient{ endpoint: endpoint, @@ -43,17 +43,18 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // Block operation is permitted only if the blob was created with x-ms-blob-type set to // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { +// - contentLength - The length of the request. +// - body - Initial data +// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err @@ -69,7 +70,7 @@ func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength i } // appendBlockCreateRequest creates the AppendBlock request. -func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -109,10 +110,10 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -205,20 +206,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // the contents are read from a source url. The Append Block operation is permitted only if the blob was // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// sourceURL - Specify a URL to the copy source. -// contentLength - The length of the request. -// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { +// - sourceURL - Specify a URL to the copy source. +// - contentLength - The length of the request. +// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err @@ -234,7 +236,7 @@ func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceUR } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. -func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -281,10 +283,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -296,10 +298,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -387,15 +389,16 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { +// - contentLength - The length of the request. +// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err @@ -411,7 +414,7 @@ func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, } // createCreateRequest creates the Create request. -func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -440,7 +443,9 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -462,10 +467,10 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -484,7 +489,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -554,12 +559,13 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version // or later. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. +// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { @@ -595,10 +601,10 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 2a52dfef7..67402b5a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "net/http" "strconv" "strings" @@ -29,8 +30,8 @@ type BlobClient struct { } // NewBlobClient creates a new instance of BlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { client := &BlobClient{ endpoint: endpoint, @@ -42,10 +43,11 @@ func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. -// options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { @@ -110,11 +112,15 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } @@ -129,7 +135,7 @@ func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientA } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -141,17 +147,15 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - if options != nil && options.Duration != nil { - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} - } + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} if options != nil && options.ProposedLeaseID != nil { req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -207,9 +211,10 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { @@ -242,10 +247,10 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -306,13 +311,14 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { @@ -344,10 +350,10 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -404,15 +410,16 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { @@ -442,17 +449,19 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-requires-sync"] = []string{"true"} if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Tier != nil { req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -461,10 +470,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -490,7 +499,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -562,13 +571,14 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { +// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err @@ -584,7 +594,7 @@ func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClien } // createSnapshotCreateRequest creates the CreateSnapshot request. -func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -597,7 +607,9 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { @@ -613,10 +625,10 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -695,10 +707,11 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // All other operations on a soft-deleted blob or snapshot causes the service to // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -741,10 +754,10 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -787,9 +800,10 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. +// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { @@ -850,12 +864,13 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { +// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err @@ -871,7 +886,7 @@ func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownl } // downloadCreateRequest creates the Download request. -func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -910,10 +925,10 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -945,9 +960,9 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { @@ -956,9 +971,9 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("Content-Length"); val != "" { @@ -1147,8 +1162,9 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { @@ -1210,12 +1226,13 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { +// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err @@ -1231,7 +1248,7 @@ func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClient } // getPropertiesCreateRequest creates the GetProperties request. -func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) if err != nil { return nil, err @@ -1260,10 +1277,10 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1302,9 +1319,9 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { @@ -1313,9 +1330,9 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-blob-type"); val != "" { @@ -1524,10 +1541,11 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { @@ -1602,12 +1620,13 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { +// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err @@ -1623,7 +1642,7 @@ func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOpt } // queryCreateRequest creates the Query request. -func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -1651,10 +1670,10 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1689,9 +1708,9 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("Content-Length"); val != "" { @@ -1829,10 +1848,11 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { @@ -1863,10 +1883,10 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1919,10 +1939,11 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { @@ -1953,10 +1974,10 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2012,9 +2033,10 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// expiryOptions - Required. Indicates mode of the expiry time -// options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { @@ -2088,11 +2110,12 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -2139,10 +2162,10 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2205,10 +2228,11 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { @@ -2241,10 +2265,10 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2287,9 +2311,10 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// legalHold - Specified if a legal hold should be set on the blob. -// options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +// - legalHold - Specified if a legal hold should be set on the blob. +// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { @@ -2358,13 +2383,14 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value // pairs // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { +// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err @@ -2380,7 +2406,7 @@ func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSe } // setMetadataCreateRequest creates the SetMetadata request. -func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2393,7 +2419,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -2412,10 +2440,10 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2484,11 +2512,12 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// tags - Blob tags -// options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - tags - Blob tags +// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { @@ -2566,11 +2595,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// tier - Indicates the tier to be set on the blob. -// options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - tier - Indicates the tier to be set on the blob. +// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -2639,15 +2669,16 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { @@ -2676,7 +2707,9 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Tier != nil { @@ -2686,10 +2719,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -2701,10 +2734,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2730,7 +2763,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2785,8 +2818,9 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { req, err := client.undeleteCreateRequest(ctx, options) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index a1391befc..7e81d1540 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -29,8 +29,8 @@ type BlockBlobClient struct { } // NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { client := &BlockBlobClient{ endpoint: endpoint, @@ -47,16 +47,17 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // the most recently uploaded version of the block, whichever list it may // belong to. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// blocks - Blob Blocks. -// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { +// - blocks - Blob Blocks. +// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err @@ -72,7 +73,7 @@ func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks Block } // commitBlockListCreateRequest creates the CommitBlockList request. -func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -106,7 +107,9 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -131,10 +134,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -153,7 +156,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -229,11 +232,12 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. -// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -330,21 +334,22 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // partial updates to a block blob’s contents using a source URL, use the Put // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { +// - contentLength - The length of the request. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err @@ -360,7 +365,7 @@ func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. -func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -392,7 +397,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -417,10 +424,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -432,10 +439,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -524,17 +531,18 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// body - Initial data -// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) { +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err @@ -550,7 +558,7 @@ func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, c } // stageBlockCreateRequest creates the StageBlock request. -func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -644,20 +652,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// sourceURL - Specify a URL to the copy source. -// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - sourceURL - Specify a URL to the copy source. +// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err @@ -673,7 +682,7 @@ func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID st } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. -func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -712,10 +721,10 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -788,16 +797,17 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err @@ -813,7 +823,7 @@ func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, } // uploadCreateRequest creates the Upload request. -func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -845,7 +855,9 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -870,10 +882,10 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -892,7 +904,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 31e671f14..5fcac5135 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -16,6 +16,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "io" "net/http" "strconv" @@ -31,8 +32,8 @@ type ContainerClient struct { } // NewContainerClient creates a new instance of ContainerClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { client := &ContainerClient{ endpoint: endpoint, @@ -44,11 +45,15 @@ func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) AcquireLease(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } @@ -63,7 +68,7 @@ func (client *ContainerClient) AcquireLease(ctx context.Context, options *Contai } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -76,17 +81,15 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, op } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - if options != nil && options.Duration != nil { - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} - } + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} if options != nil && options.ProposedLeaseID != nil { req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -134,9 +137,10 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { @@ -170,10 +174,10 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -226,13 +230,14 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { @@ -265,10 +270,10 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -316,11 +321,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // Create - creates a new container under the specified account. If the container with the same name already exists, the operation // fails // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, options, containerCpkScopeInfo) +// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } @@ -335,7 +341,7 @@ func (client *ContainerClient) Create(ctx context.Context, options *ContainerCli } // createCreateRequest creates the Create request. -func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -348,7 +354,9 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Access != nil { @@ -358,11 +366,11 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { - req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCpkScopeInfo.DefaultEncryptionScope} + if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} } - if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { - req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)} + if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil @@ -403,10 +411,11 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -438,10 +447,10 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -476,10 +485,11 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { @@ -559,9 +569,10 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo -// method. +// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { @@ -623,9 +634,10 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { @@ -670,9 +682,9 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { - result.Metadata = map[string]string{} + result.Metadata = map[string]*string{} } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("ETag"); val != "" { @@ -748,10 +760,11 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment -// method. +// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +// // listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) @@ -814,13 +827,13 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that -// acts as a placeholder for all blobs whose names begin with the same substring up to the -// appearance of the delimiter character. The delimiter may be a single character or a string. -// options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment -// method. +// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { @@ -914,10 +927,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { @@ -949,10 +963,10 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -996,9 +1010,10 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// sourceContainerName - Required. Specifies the name of the container to rename. -// options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +// - sourceContainerName - Required. Specifies the name of the container to rename. +// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { @@ -1064,10 +1079,11 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { @@ -1099,10 +1115,10 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -1149,8 +1165,9 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { req, err := client.restoreCreateRequest(ctx, options) if err != nil { @@ -1218,12 +1235,13 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// containerACL - the acls for the container -// options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - containerACL - the acls for the container +// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -1259,10 +1277,10 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -1310,10 +1328,11 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -1347,11 +1366,13 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { @@ -1395,12 +1416,13 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// body - Initial data -// options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { @@ -1437,7 +1459,7 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + return req, req.SetBody(body, multipartContentType) } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index de8b72808..022807f53 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -77,7 +77,7 @@ type AppendBlobClientCreateOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -136,10 +136,6 @@ type BlobClientAbortCopyFromURLOptions struct { // BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. type BlobClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is // not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. @@ -197,7 +193,7 @@ type BlobClientCopyFromURLOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -218,7 +214,7 @@ type BlobClientCreateSnapshotOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -415,7 +411,7 @@ type BlobClientSetMetadataOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -476,7 +472,7 @@ type BlobClientStartCopyFromURLOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Optional: Indicates the priority with which to rehydrate an archived blob. RehydratePriority *RehydratePriority // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -656,7 +652,7 @@ type BlockBlobClientCommitBlockListOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -699,7 +695,7 @@ type BlockBlobClientPutBlobFromURLOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -762,7 +758,7 @@ type BlockBlobClientUploadOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -796,10 +792,6 @@ type ClearRange struct { // ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. type ContainerClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is // not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. @@ -849,7 +841,7 @@ type ContainerClientCreateOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -893,7 +885,7 @@ type ContainerClientGetPropertiesOptions struct { Timeout *int32 } -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. type ContainerClientListBlobFlatSegmentOptions struct { // Include this parameter to specify one or more datasets to include in the response. @@ -920,7 +912,7 @@ type ContainerClientListBlobFlatSegmentOptions struct { Timeout *int32 } -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager // method. type ContainerClientListBlobHierarchySegmentOptions struct { // Include this parameter to specify one or more datasets to include in the response. @@ -1013,7 +1005,7 @@ type ContainerClientSetMetadataOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1032,8 +1024,8 @@ type ContainerClientSubmitBatchOptions struct { Timeout *int32 } -// ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCpkScopeInfo struct { +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all // future writes. DefaultEncryptionScope *string @@ -1078,11 +1070,11 @@ type ContainerProperties struct { RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` } -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another // domain. Web browsers implement a security restriction known as same-origin policy that // prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin // domain) to call APIs in another domain -type CorsRule struct { +type CORSRule struct { // REQUIRED; the request headers that the origin domain may specify on the CORS request. AllowedHeaders *string `xml:"AllowedHeaders"` @@ -1103,8 +1095,8 @@ type CorsRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CpkInfo contains a group of parameters for the BlobClient.Download method. -type CpkInfo struct { +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. EncryptionAlgorithm *EncryptionAlgorithmType @@ -1116,8 +1108,8 @@ type CpkInfo struct { EncryptionKeySHA256 *string } -// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CpkScopeInfo struct { +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided // in the request. If not specified, encryption is performed with the default // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. @@ -1333,7 +1325,7 @@ type PageBlobClientCreateOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1344,7 +1336,8 @@ type PageBlobClientCreateOptions struct { Timeout *int32 } -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff method. +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. type PageBlobClientGetPageRangesDiffOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing @@ -1382,7 +1375,7 @@ type PageBlobClientGetPageRangesDiffOptions struct { Timeout *int32 } -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. type PageBlobClientGetPageRangesOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing @@ -1495,7 +1488,7 @@ type QueryFormat struct { JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` // parquet configuration - ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` + ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` } // QueryRequest - Groups the set of query request settings. @@ -1558,8 +1551,6 @@ type ServiceClientFilterBlobsOptions struct { // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string } // ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. @@ -1597,7 +1588,7 @@ type ServiceClientGetUserDelegationKeyOptions struct { Timeout *int32 } -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. type ServiceClientListContainersSegmentOptions struct { // Include this parameter to specify that the container's metadata be returned as part of the response body. @@ -1689,7 +1680,7 @@ type StorageError struct { // StorageServiceProperties - Storage Service Properties. type StorageServiceProperties struct { // The set of CORS rules. - Cors []*CorsRule `xml:"Cors>CorsRule"` + CORS []*CORSRule `xml:"Cors>CorsRule"` // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible // values include version 2008-10-27 and all more recent versions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index 97131f685..e5b6cda2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -20,7 +20,7 @@ import ( ) // MarshalXML implements the xml.Marshaller interface for type AccessPolicy. -func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias AccessPolicy aux := &struct { *alias @@ -31,11 +31,11 @@ func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { Expiry: (*timeRFC3339)(a.Expiry), Start: (*timeRFC3339)(a.Start), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. -func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias AccessPolicy aux := &struct { *alias @@ -44,7 +44,7 @@ func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro }{ alias: (*alias)(a), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } a.Expiry = (*time.Time)(aux.Expiry) @@ -53,7 +53,7 @@ func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } // MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. -func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ArrowConfiguration aux := &struct { *alias @@ -64,11 +64,11 @@ func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) e if a.Schema != nil { aux.Schema = &a.Schema } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. -func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobFlatListSegment aux := &struct { *alias @@ -79,11 +79,11 @@ func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) if b.BlobItems != nil { aux.BlobItems = &b.BlobItems } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. -func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobHierarchyListSegment aux := &struct { *alias @@ -98,11 +98,11 @@ func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElem if b.BlobPrefixes != nil { aux.BlobPrefixes = &b.BlobPrefixes } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. -func (b *BlobItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias BlobItem aux := &struct { *alias @@ -111,7 +111,7 @@ func (b *BlobItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { }{ alias: (*alias)(b), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } b.Metadata = (map[string]*string)(aux.Metadata) @@ -120,7 +120,7 @@ func (b *BlobItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { } // MarshalXML implements the xml.Marshaller interface for type BlobProperties. -func (b BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobProperties aux := &struct { *alias @@ -148,11 +148,11 @@ func (b BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) aux.ContentMD5 = &encodedContentMD5 } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties. -func (b *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias BlobProperties aux := &struct { *alias @@ -168,7 +168,7 @@ func (b *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er }{ alias: (*alias)(b), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) @@ -188,7 +188,7 @@ func (b *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er } // MarshalXML implements the xml.Marshaller interface for type BlobTags. -func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "Tags" type alias BlobTags aux := &struct { @@ -200,11 +200,11 @@ func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if b.BlobTagSet != nil { aux.BlobTagSet = &b.BlobTagSet } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlockList. -func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlockList aux := &struct { *alias @@ -219,11 +219,11 @@ func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if b.UncommittedBlocks != nil { aux.UncommittedBlocks = &b.UncommittedBlocks } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlockLookupList. -func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "BlockList" type alias BlockLookupList aux := &struct { @@ -243,11 +243,11 @@ func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) erro if b.Uncommitted != nil { aux.Uncommitted = &b.Uncommitted } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. -func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (c *ContainerItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias ContainerItem aux := &struct { *alias @@ -255,7 +255,7 @@ func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err }{ alias: (*alias)(c), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } c.Metadata = (map[string]*string)(aux.Metadata) @@ -263,7 +263,7 @@ func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err } // MarshalXML implements the xml.Marshaller interface for type ContainerProperties. -func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ContainerProperties aux := &struct { *alias @@ -274,11 +274,11 @@ func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) DeletedTime: (*timeRFC1123)(c.DeletedTime), LastModified: (*timeRFC1123)(c.LastModified), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. -func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias ContainerProperties aux := &struct { *alias @@ -287,7 +287,7 @@ func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElemen }{ alias: (*alias)(c), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } c.DeletedTime = (*time.Time)(aux.DeletedTime) @@ -296,7 +296,7 @@ func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElemen } // MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. -func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias FilterBlobSegment aux := &struct { *alias @@ -307,11 +307,11 @@ func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) er if f.Blobs != nil { aux.Blobs = &f.Blobs } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type GeoReplication. -func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias GeoReplication aux := &struct { *alias @@ -320,11 +320,11 @@ func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error alias: (*alias)(&g), LastSyncTime: (*timeRFC1123)(g.LastSyncTime), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. -func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias GeoReplication aux := &struct { *alias @@ -332,7 +332,7 @@ func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er }{ alias: (*alias)(g), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } g.LastSyncTime = (*time.Time)(aux.LastSyncTime) @@ -340,7 +340,7 @@ func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er } // MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. -func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (l ListContainersSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ListContainersSegmentResponse aux := &struct { *alias @@ -351,11 +351,11 @@ func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.Star if l.ContainerItems != nil { aux.ContainerItems = &l.ContainerItems } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type PageList. -func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias PageList aux := &struct { *alias @@ -370,11 +370,11 @@ func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if p.PageRange != nil { aux.PageRange = &p.PageRange } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type QueryRequest. -func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "QueryRequest" type alias QueryRequest aux := &struct { @@ -382,12 +382,12 @@ func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { }{ alias: (*alias)(&q), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalJSON implements the json.Marshaller interface for type StorageError. func (s StorageError) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) + objectMap := make(map[string]any) populate(objectMap, "Message", s.Message) return json.Marshal(objectMap) } @@ -413,22 +413,22 @@ func (s *StorageError) UnmarshalJSON(data []byte) error { } // MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. -func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias StorageServiceProperties aux := &struct { *alias - Cors *[]*CorsRule `xml:"Cors>CorsRule"` + CORS *[]*CORSRule `xml:"Cors>CorsRule"` }{ alias: (*alias)(&s), } - if s.Cors != nil { - aux.Cors = &s.Cors + if s.CORS != nil { + aux.CORS = &s.CORS } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. -func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias UserDelegationKey aux := &struct { *alias @@ -439,11 +439,11 @@ func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) er SignedExpiry: (*timeRFC3339)(u.SignedExpiry), SignedStart: (*timeRFC3339)(u.SignedStart), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. -func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias UserDelegationKey aux := &struct { *alias @@ -452,7 +452,7 @@ func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) }{ alias: (*alias)(u), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } u.SignedExpiry = (*time.Time)(aux.SignedExpiry) @@ -460,7 +460,7 @@ func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) return nil } -func populate(m map[string]interface{}, k string, v interface{}) { +func populate(m map[string]any, k string, v any) { if v == nil { return } else if azcore.IsNullValue(v) { @@ -470,7 +470,7 @@ func populate(m map[string]interface{}, k string, v interface{}) { } } -func unpopulate(data json.RawMessage, fn string, v interface{}) error { +func unpopulate(data json.RawMessage, fn string, v any) error { if data == nil { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index 0b1e8a521..b209e99f0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -29,8 +29,8 @@ type PageBlobClient struct { } // NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { client := &PageBlobClient{ endpoint: endpoint, @@ -41,16 +41,17 @@ func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { +// - contentLength - The length of the request. +// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err @@ -66,7 +67,7 @@ func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int6 } // clearPagesCreateRequest creates the ClearPages request. -func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -107,10 +108,10 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -188,13 +189,14 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // be read or copied from as usual. This API is supported since REST version // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { @@ -223,10 +225,10 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, } req.Raw().URL.RawQuery = reqQP.Encode() if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -286,17 +288,18 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { +// - contentLength - The length of the request. +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err @@ -312,7 +315,7 @@ func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, b } // createCreateRequest creates the Create request. -func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -344,7 +347,9 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -366,10 +371,10 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -392,7 +397,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -461,11 +466,12 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob -// If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ More: func(page PageBlobClientGetPageRangesResponse) bool { @@ -522,10 +528,10 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -588,12 +594,12 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. -// If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ More: func(page PageBlobClientGetPageRangesDiffResponse) bool { @@ -656,10 +662,10 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -722,15 +728,16 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err @@ -746,7 +753,7 @@ func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int6 } // resizeCreateRequest creates the Resize request. -func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -773,10 +780,10 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -837,13 +844,14 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to -// page blobs only. This property indicates how the service should modify the blob's sequence number -// options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { @@ -875,10 +883,10 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -942,17 +950,18 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { +// - contentLength - The length of the request. +// - body - Initial data +// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err @@ -968,7 +977,7 @@ func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int } // uploadPagesCreateRequest creates the UploadPages request. -func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1015,10 +1024,10 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1106,24 +1115,25 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from // a URL // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// sourceURL - Specify a URL to the copy source. -// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header -// and x-ms-range/Range destination range header. -// contentLength - The length of the request. -// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end -// is required. -// options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { +// - sourceURL - Specify a URL to the copy source. +// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// - contentLength - The length of the request. +// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err @@ -1139,7 +1149,7 @@ func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. -func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1186,10 +1196,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1201,10 +1211,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 22264c17c..386c943e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -456,13 +456,13 @@ type BlobClientDownloadResponse struct { LegalHold *bool // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. ObjectReplicationPolicyID *string // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]string + ObjectReplicationRules map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -624,13 +624,13 @@ type BlobClientGetPropertiesResponse struct { LegalHold *bool // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. ObjectReplicationPolicyID *string // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]string + ObjectReplicationRules map[string]*string // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. RehydratePriority *string @@ -760,7 +760,7 @@ type BlobClientQueryResponse struct { LeaseStatus *LeaseStatusType // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1401,7 +1401,7 @@ type ContainerClientGetPropertiesResponse struct { LeaseStatus *LeaseStatusType // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1410,7 +1410,7 @@ type ContainerClientGetPropertiesResponse struct { Version *string } -// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.ListBlobFlatSegment. +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. type ContainerClientListBlobFlatSegmentResponse struct { ListBlobsFlatSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. @@ -1429,7 +1429,7 @@ type ContainerClientListBlobFlatSegmentResponse struct { Version *string `xml:"Version"` } -// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.ListBlobHierarchySegment. +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. type ContainerClientListBlobHierarchySegmentResponse struct { ListBlobsHierarchySegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. @@ -1673,7 +1673,7 @@ type PageBlobClientCreateResponse struct { VersionID *string } -// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.GetPageRangesDiff. +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. type PageBlobClientGetPageRangesDiffResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. @@ -1698,7 +1698,7 @@ type PageBlobClientGetPageRangesDiffResponse struct { Version *string `xml:"Version"` } -// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.GetPageRanges. +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. type PageBlobClientGetPageRangesResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. @@ -1931,7 +1931,7 @@ type ServiceClientGetUserDelegationKeyResponse struct { Version *string `xml:"Version"` } -// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.ListContainersSegment. +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. type ServiceClientListContainersSegmentResponse struct { ListContainersSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index 1cb779d84..b700211c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -29,8 +29,8 @@ type ServiceClient struct { } // NewServiceClient creates a new instance of ServiceClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { client := &ServiceClient{ endpoint: endpoint, @@ -43,10 +43,12 @@ func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { // expression. Filter blobs searches across all containers within a storage account but can // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { - req, err := client.filterBlobsCreateRequest(ctx, options) +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -61,7 +63,7 @@ func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceCl } // filterBlobsCreateRequest creates the FilterBlobs request. -func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -71,16 +73,14 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, optio if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if options != nil && options.Where != nil { - reqQP.Set("where", *options.Where) - } + reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} @@ -116,8 +116,9 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { @@ -186,8 +187,9 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { @@ -245,8 +247,9 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { @@ -311,10 +314,11 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// keyInfo - Key information -// options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey -// method. +// - keyInfo - Key information +// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { @@ -378,10 +382,11 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account -// If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment -// method. +// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +// // listContainersSegmentCreateRequest creates the ListContainersSegment request. func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) @@ -435,9 +440,10 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// storageServiceProperties - The StorageService properties. -// options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { @@ -491,12 +497,13 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// // Generated from API version 2020-10-02 -// contentLength - The length of the request. -// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// body - Initial data -// options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { @@ -506,7 +513,7 @@ func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int6 if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { + if !runtime.HasStatusCode(resp, http.StatusAccepted) { return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) } return client.submitBatchHandleResponse(resp) @@ -532,7 +539,7 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + return req, req.SetBody(body, multipartContentType) } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go new file mode 100644 index 000000000..e7c8e9213 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strings" +) + +type storageAuthorizer struct { + scopes []string + tenantID string +} + +func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy { + s := storageAuthorizer{scopes: []string{TokenScope}} + return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: s.onRequest, + OnChallenge: s.onChallenge, + }, + }) +} + +func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + err := s.parseChallenge(resp) + if err != nil { + return err + } + // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) >= 3 { + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant + } else { + return "" + } +} + +func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + s.tenantID = parseTenant(vals["authorization_uri"]) + + scope := vals["resource_id"] + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + s.scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 7751781de..02936ad03 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -38,6 +38,8 @@ const ( HeaderIfNoneMatch = "If-None-Match" HeaderIfUnmodifiedSince = "If-Unmodified-Since" HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" ) const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 @@ -85,22 +87,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err connStrMap[parts[0]] = parts[1] } - accountName, ok := connStrMap["AccountName"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountName") - } - - accountKey, ok := connStrMap["AccountKey"] - if !ok { - sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") - } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), - }, nil - } - protocol, ok := connStrMap["DefaultEndpointsProtocol"] if !ok { protocol = defaultScheme @@ -111,24 +97,45 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err suffix = defaultSuffix } - if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { return ParsedConnectionString{ - ServiceURL: blobEndpoint, + ServiceURL: serviceURL, AccountName: accountName, AccountKey: accountKey, }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), - AccountName: accountName, - AccountKey: accountKey, - }, nil } // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if tagsMap == nil { + if len(tagsMap) == 0 { return nil } blobTagSet := make([]*generated.BlobTag, 0) @@ -140,7 +147,7 @@ func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { } func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if tagsMap == nil { + if len(tagsMap) == 0 { return nil } tags := make([]string, 0) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go new file mode 100644 index 000000000..4d26992d3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch = exported.EventSubmitBatch +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go index 020b45bfd..2896788e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -50,11 +50,11 @@ type DownloadBufferOptions = blob.DownloadBufferOptions // DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. type DownloadFileOptions = blob.DownloadFileOptions -// CpkInfo contains a group of parameters for client provided encryption key. -type CpkInfo = blob.CpkInfo +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = blob.CPKInfo -// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CpkScopeInfo = container.CpkScopeInfo +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = container.CPKScopeInfo // AccessConditions identifies blob-specific access conditions which you optionally set. type AccessConditions = exported.BlobAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index ba7d8fb45..69328dbeb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -25,9 +25,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage page blob; type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] @@ -37,7 +35,7 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) @@ -150,14 +148,18 @@ func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (Cre // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) { +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, contentRange blob.HTTPRange, options *UploadPagesOptions) (UploadPagesResponse, error) { count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) if err != nil { return UploadPagesResponse{}, err } - uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + uploadPagesOptions := &generated.PageBlobClientUploadPagesOptions{ + Range: exported.FormatHTTPRange(contentRange), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() if options != nil && options.TransactionalValidation != nil { body, err = options.TransactionalValidation.Apply(body, uploadPagesOptions) @@ -345,7 +347,7 @@ func (pb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blo // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. // For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. @@ -359,6 +361,12 @@ func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return pb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return pb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -367,7 +375,7 @@ func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return pb.BlobClient().SetMetadata(ctx, metadata, o) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go index 646587cec..096a7910a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -28,7 +28,7 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { return generated.PossibleCopyStatusTypeValues() } -// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier. type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier const ( @@ -50,7 +50,7 @@ func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { return generated.PossiblePremiumPageBlobAccessTierValues() } -// SequenceNumberActionType defines values for SequenceNumberActionType +// SequenceNumberActionType defines values for SequenceNumberActionType. type SequenceNumberActionType = generated.SequenceNumberActionType const ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index 25f86b387..e6148f173 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -18,7 +18,7 @@ import ( // Type Declarations --------------------------------------------------------------------- -// PageList - the list of pages +// PageList - the list of pages. type PageList = generated.PageList // ClearRange defines a range of pages. @@ -46,16 +46,16 @@ type CreateOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Optional. Indicates the tier to be set on the page blob. Tier *PremiumPageBlobAccessTier HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions // Specifies the date time when the blobs immutability policy is set to expire. @@ -67,7 +67,7 @@ type CreateOptions struct { } func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, - *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } @@ -79,38 +79,31 @@ func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *gener Tier: o.Tier, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // UploadPagesOptions contains the optional parameters for the Client.UploadPages method. type UploadPagesOptions struct { - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange - // TransactionalValidation specifies the transfer validation type to use. // The default is nil (no transfer validation). TransactionalValidation blob.TransferValidationType - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions AccessConditions *blob.AccessConditions } -func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { +func (o *UploadPagesOptions) format() (*generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &generated.PageBlobClientUploadPagesOptions{ - Range: exported.FormatHTTPRange(o.Range), + return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -123,9 +116,9 @@ type UploadPagesFromURLOptions struct { // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. SourceContentValidation blob.SourceContentValidationType - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions @@ -134,7 +127,7 @@ type UploadPagesFromURLOptions struct { AccessConditions *blob.AccessConditions } -func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil @@ -149,27 +142,27 @@ func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPag } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // ClearPagesOptions contains the optional parameters for the Client.ClearPages operation type ClearPagesOptions struct { - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions AccessConditions *blob.AccessConditions } -func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -177,20 +170,20 @@ func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generat // GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. type GetPageRangesOptions struct { Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot // of the target blob. The response will only contain pages that were changed // between the target blob and its previous snapshot. PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response // will contain only pages that were changed between target blob and previous // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental + // specified by PrevSnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string // Range specifies a range of bytes. The default value is all bytes. @@ -227,20 +220,20 @@ type GetPageRangesDiffOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot // of the target blob. The response will only contain pages that were changed // between the target blob and its previous snapshot. PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response // will contain only pages that were changed between target blob and previous // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental + // specified by PrevSnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string // Range specifies a range of bytes. The default value is all bytes. @@ -275,19 +268,19 @@ func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRan // ResizeOptions contains the optional parameters for the Client.Resize method. type ResizeOptions struct { - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions } func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return nil, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go index 7b1f6c98c..454a08cb9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -29,17 +29,16 @@ type AccountSignatureValues struct { Protocol Protocol `param:"spr"` // See the SASProtocol* constants StartTime time.Time `param:"st"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() } // SignWithSharedKey uses an account's shared key credential to sign this signature values to produce // the proper SAS query parameters. func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") } if v.Version == "" { @@ -51,12 +50,18 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } v.Permissions = perms.String() + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) stringToSign := strings.Join([]string{ sharedKeyCredential.AccountName(), v.Permissions, - v.Services, + "b", // blob service v.ResourceTypes, startTime, expiryTime, @@ -80,7 +85,7 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey ipRange: v.IPRange, // Account-specific SAS parameters - services: v.Services, + services: "b", // will always be "b" resourceTypes: v.ResourceTypes, // Calculated SAS signature @@ -91,13 +96,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } // AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. type AccountPermissions struct { - Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags, PermanentDelete bool + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. +// Call this method to set AccountSignatureValues' Permissions field. func (p *AccountPermissions) String() string { var buffer bytes.Buffer if p.Read { @@ -130,16 +135,19 @@ func (p *AccountPermissions) String() string { if p.Process { buffer.WriteRune('p') } + if p.FilterByTags { + buffer.WriteRune('f') + } if p.Tag { buffer.WriteRune('t') } - if p.FilterByTags { - buffer.WriteRune('f') + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') } return buffer.String() } -// Parse initializes the AccountSASPermissions' fields from a string. +// Parse initializes the AccountPermissions' fields from a string. func parseAccountPermissions(s string) (AccountPermissions, error) { p := AccountPermissions{} // Clear out the flags for _, r := range s { @@ -150,6 +158,8 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { p.Write = true case 'd': p.Delete = true + case 'x': + p.DeletePreviousVersion = true case 'y': p.PermanentDelete = true case 'l': @@ -162,12 +172,12 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { p.Update = true case 'p': p.Process = true - case 'x': - p.Process = true case 't': p.Tag = true case 'f': p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true default: return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) } @@ -175,54 +185,14 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { return p, nil } -// AccountServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s *AccountServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -/*func parseAccountServices(str string) (AccountServices, error) { - s := AccountServices{} // Clear out the flags - for _, r := range str { - switch r { - case 'b': - s.Blob = true - case 'q': - s.Queue = true - case 'f': - s.File = true - default: - return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r) - } - } - return s, nil -}*/ - // AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. type AccountResourceTypes struct { Service, Container, Object bool } // String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. +// Call this method to set AccountSignatureValues' ResourceTypes field. func (rt *AccountResourceTypes) String() string { var buffer bytes.Buffer if rt.Service { @@ -237,9 +207,9 @@ func (rt *AccountResourceTypes) String() string { return buffer.String() } -// Parse initializes the AccountResourceTypes's fields from a string. -/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { - rt := AccountResourceTypes{} // Clear out the flags +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} for _, r := range s { switch r { case 's': @@ -249,8 +219,8 @@ func (rt *AccountResourceTypes) String() string { case 'o': rt.Object = true default: - return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r) + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) } } return rt, nil -}*/ +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index ef4682859..4d97372da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -34,10 +34,10 @@ var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01- type Protocol string const ( - // ProtocolHTTPS can be specified for a SAS protocol + // ProtocolHTTPS can be specified for a SAS protocol. ProtocolHTTPS Protocol = "https" - // ProtocolHTTPSandHTTP can be specified for a SAS protocol + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. ProtocolHTTPSandHTTP Protocol = "https,http" ) @@ -148,137 +148,137 @@ type QueryParameters struct { seTimeFormat string } -// AuthorizedObjectID returns authorizedObjectID +// AuthorizedObjectID returns authorizedObjectID. func (p *QueryParameters) AuthorizedObjectID() string { return p.authorizedObjectID } -// UnauthorizedObjectID returns unauthorizedObjectID +// UnauthorizedObjectID returns unauthorizedObjectID. func (p *QueryParameters) UnauthorizedObjectID() string { return p.unauthorizedObjectID } -// SignedCorrelationID returns signedCorrelationID +// SignedCorrelationID returns signedCorrelationID. func (p *QueryParameters) SignedCorrelationID() string { return p.correlationID } -// SignedOID returns signedOID +// SignedOID returns signedOID. func (p *QueryParameters) SignedOID() string { return p.signedOID } -// SignedTID returns signedTID +// SignedTID returns signedTID. func (p *QueryParameters) SignedTID() string { return p.signedTID } -// SignedStart returns signedStart +// SignedStart returns signedStart. func (p *QueryParameters) SignedStart() time.Time { return p.signedStart } -// SignedExpiry returns signedExpiry +// SignedExpiry returns signedExpiry. func (p *QueryParameters) SignedExpiry() time.Time { return p.signedExpiry } -// SignedService returns signedService +// SignedService returns signedService. func (p *QueryParameters) SignedService() string { return p.signedService } -// SignedVersion returns signedVersion +// SignedVersion returns signedVersion. func (p *QueryParameters) SignedVersion() string { return p.signedVersion } -// SnapshotTime returns snapshotTime +// SnapshotTime returns snapshotTime. func (p *QueryParameters) SnapshotTime() time.Time { return p.snapshotTime } -// Version returns version +// Version returns version. func (p *QueryParameters) Version() string { return p.version } -// Services returns services +// Services returns services. func (p *QueryParameters) Services() string { return p.services } -// ResourceTypes returns resourceTypes +// ResourceTypes returns resourceTypes. func (p *QueryParameters) ResourceTypes() string { return p.resourceTypes } -// Protocol returns protocol +// Protocol returns protocol. func (p *QueryParameters) Protocol() Protocol { return p.protocol } -// StartTime returns startTime +// StartTime returns startTime. func (p *QueryParameters) StartTime() time.Time { return p.startTime } -// ExpiryTime returns expiryTime +// ExpiryTime returns expiryTime. func (p *QueryParameters) ExpiryTime() time.Time { return p.expiryTime } -// IPRange returns ipRange +// IPRange returns ipRange. func (p *QueryParameters) IPRange() IPRange { return p.ipRange } -// Identifier returns identifier +// Identifier returns identifier. func (p *QueryParameters) Identifier() string { return p.identifier } -// Resource returns resource +// Resource returns resource. func (p *QueryParameters) Resource() string { return p.resource } -// Permissions returns permissions +// Permissions returns permissions. func (p *QueryParameters) Permissions() string { return p.permissions } -// Signature returns signature +// Signature returns signature. func (p *QueryParameters) Signature() string { return p.signature } -// CacheControl returns cacheControl +// CacheControl returns cacheControl. func (p *QueryParameters) CacheControl() string { return p.cacheControl } -// ContentDisposition returns contentDisposition +// ContentDisposition returns contentDisposition. func (p *QueryParameters) ContentDisposition() string { return p.contentDisposition } -// ContentEncoding returns contentEncoding +// ContentEncoding returns contentEncoding. func (p *QueryParameters) ContentEncoding() string { return p.contentEncoding } -// ContentLanguage returns contentLanguage +// ContentLanguage returns contentLanguage. func (p *QueryParameters) ContentLanguage() string { return p.contentLanguage } -// ContentType returns sontentType +// ContentType returns contentType. func (p *QueryParameters) ContentType() string { return p.contentType } -// SignedDirectoryDepth returns signedDirectoryDepth +// SignedDirectoryDepth returns signedDirectoryDepth. func (p *QueryParameters) SignedDirectoryDepth() string { return p.signedDirectoryDepth } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index b07d7118e..b900e2b06 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -8,6 +8,7 @@ package sas import ( "bytes" + "errors" "fmt" "strings" "time" @@ -24,7 +25,7 @@ type BlobSignatureValues struct { StartTime time.Time `param:"st"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero SnapshotTime time.Time - Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() IPRange IPRange `param:"sip"` Identifier string `param:"si"` ContainerName string @@ -50,8 +51,8 @@ func getDirectoryDepth(path string) string { // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if sharedKeyCredential == nil { - return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") } //Make sure the permission characters are in the correct order @@ -141,13 +142,11 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") } - v.Permissions = perms.String() + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -161,6 +160,20 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us } else { resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } if v.Version == "" { v.Version = Version @@ -253,15 +266,15 @@ func getCanonicalName(account string, containerName string, blobName string, dir } // ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob type ContainerPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, List, FilterByTags bool - Execute, ModifyOwnership, ModifyPermissions, SetImmutabilityPolicy bool // Hierarchical Namespace only + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts } // String produces the SAS permissions string for an Azure Storage container. -// Call this method to set BlobSASSignatureValues's Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *ContainerPermissions) String() string { var b bytes.Buffer if p.Read { @@ -285,9 +298,15 @@ func (p *ContainerPermissions) String() string { if p.List { b.WriteRune('l') } + if p.Tag { + b.WriteRune('t') + } if p.FilterByTags { b.WriteRune('f') } + if p.Move { + b.WriteRune('m') + } if p.Execute { b.WriteRune('e') } @@ -303,8 +322,8 @@ func (p *ContainerPermissions) String() string { return b.String() } -// Parse initializes the ContainerSASPermissions' fields from a string. -/*func parseContainerPermissions(s string) (ContainerPermissions, error) { +// Parse initializes ContainerPermissions' fields from a string. +func parseContainerPermissions(s string) (ContainerPermissions, error) { p := ContainerPermissions{} // Clear the flags for _, r := range s { switch r { @@ -323,28 +342,34 @@ func (p *ContainerPermissions) String() string { case 'l': p.List = true case 't': + p.Tag = true + case 'f': p.FilterByTags = true + case 'm': + p.Move = true case 'e': p.Execute = true case 'o': p.ModifyOwnership = true case 'p': p.ModifyPermissions = true + case 'i': + p.SetImmutabilityPolicy = true default: return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } return p, nil -}*/ +} // BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. type BlobPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions, PermanentDelete bool + Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSignatureValues's Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *BlobPermissions) String() string { var b bytes.Buffer if p.Read { @@ -386,10 +411,13 @@ func (p *BlobPermissions) String() string { if p.Permissions { b.WriteRune('p') } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } return b.String() } -// Parse initializes the BlobSASPermissions's fields from a string. +// Parse initializes BlobPermissions' fields from a string. func parseBlobPermissions(s string) (BlobPermissions, error) { p := BlobPermissions{} // Clear the flags for _, r := range s { @@ -406,12 +434,12 @@ func parseBlobPermissions(s string) (BlobPermissions, error) { p.Delete = true case 'x': p.DeletePreviousVersion = true - case 't': - p.Tag = true case 'y': p.PermanentDelete = true case 'l': p.List = true + case 't': + p.Tag = true case 'm': p.Move = true case 'e': @@ -420,6 +448,8 @@ func parseBlobPermissions(s string) (BlobPermissions, error) { p.Ownership = true case 'p': p.Permissions = true + case 'i': + p.SetImmutabilityPolicy = true default: return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go new file mode 100644 index 000000000..924fd1081 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index 2d9775d6e..fb7b8696a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -7,8 +7,13 @@ package service import ( + "bytes" "context" "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -18,7 +23,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -26,9 +30,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. type Client base.Client[generated.ServiceClient] @@ -38,12 +40,12 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + return (*Client)(base.NewServiceClient(serviceURL, pl, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -115,6 +117,15 @@ func (s *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ServiceClient])(s)) } +func (s *Client) credential() any { + return base.Credential((*base.Client[generated.ServiceClient])(s)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() @@ -124,7 +135,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.credential())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -154,6 +165,7 @@ func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName stri } // GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { getAccountInfoOptions := o.format() resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) @@ -186,7 +198,7 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if page == nil { req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) } else { - listOptions.Marker = page.Marker + listOptions.Marker = page.NextMarker req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) } if err != nil { @@ -243,19 +255,17 @@ func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (Ge // GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -// This validity can be checked with CanGetAccountSASToken(). -func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) { +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if s.sharedKey() == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } - + st := o.format() qps, err := sas.AccountSignatureValues{ Version: sas.Version, Protocol: sas.ProtocolHTTPS, Permissions: permissions.String(), - Services: services.String(), ResourceTypes: resources.String(), - StartTime: start.UTC(), + StartTime: st, ExpiryTime: expiry.UTC(), }.SignWithSharedKey(s.sharedKey()) if err != nil { @@ -277,8 +287,73 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A // https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags // eg. "dog='germanshepherd' and penguin='emperorpenguin'" // To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) { +func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { serviceFilterBlobsOptions := o.format() - resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions) + resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions) return resp, err } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS. +func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := s.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: s.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go index 1988e58a7..22f49474d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -7,9 +7,12 @@ package service import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "time" ) // SharedKeyCredential contains an account's name and its primary or secondary key. @@ -30,7 +33,7 @@ type UserDelegationKey = generated.UserDelegationKey // KeyInfo contains KeyInfo struct. type KeyInfo = generated.KeyInfo -// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method. type GetUserDelegationCredentialOptions struct { // placeholder for future options } @@ -51,11 +54,11 @@ type ContainerItem = generated.ContainerItem // ContainerProperties - Properties of a container type ContainerProperties = generated.ContainerProperties -// CpkInfo contains a group of parameters for the BlobClient.Download method. -type CpkInfo = generated.CpkInfo +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo = generated.CPKInfo -// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CpkScopeInfo = generated.CpkScopeInfo +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo = generated.CPKScopeInfo // CreateContainerOptions contains the optional parameters for the container.Client.Create method. type CreateContainerOptions = container.CreateOptions @@ -66,28 +69,34 @@ type DeleteContainerOptions = container.DeleteOptions // RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. type RestoreContainerOptions = container.RestoreOptions -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another // domain. Web browsers implement a security restriction known as same-origin policy that // prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain -type CorsRule = generated.CorsRule +// domain) to call APIs in another domain. +type CORSRule = generated.CORSRule -// FilterBlobItem - Blob info returned from method Client.FilterBlobs +// FilterBlobSegment - The result of a Filter Blobs API call. +type FilterBlobSegment = generated.FilterBlobSegment + +// BlobTags - Blob tags +type BlobTags = generated.BlobTags + +// FilterBlobItem - Blob info returned from method Client.FilterBlobs. type FilterBlobItem = generated.FilterBlobItem -// GeoReplication - Geo-Replication information for the Secondary Storage Service +// GeoReplication - Geo-Replication information for the Secondary Storage Service. type GeoReplication = generated.GeoReplication -// RetentionPolicy - the retention policy which determines how long the associated data should persist +// RetentionPolicy - the retention policy which determines how long the associated data should persist. type RetentionPolicy = generated.RetentionPolicy -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs. type Metrics = generated.Metrics // Logging - Azure Analytics Logging settings. type Logging = generated.Logging -// StaticWebsite - The properties that enable an account to host a static website +// StaticWebsite - The properties that enable an account to host a static website. type StaticWebsite = generated.StaticWebsite // StorageServiceProperties - Storage Service Properties. @@ -120,7 +129,7 @@ func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOpt // --------------------------------------------------------------------------------------------------------------------- -// ListContainersOptions provides set of configurations for ListContainers operation +// ListContainersOptions provides set of configurations for ListContainers operation. type ListContainersOptions struct { Include ListContainersInclude @@ -154,25 +163,28 @@ type ListContainersInclude struct { // SetPropertiesOptions provides set of options for Client.SetProperties type SetPropertiesOptions struct { // The set of CORS rules. - Cors []*CorsRule + CORS []*CORSRule // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions + // values include version 2008-10-27 and all more recent versions. DefaultServiceVersion *string - // the retention policy which determines how long the associated data should persist + // the retention policy which determines how long the associated data should persist. DeleteRetentionPolicy *RetentionPolicy // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" HourMetrics *Metrics // Azure Analytics Logging settings. + // If version is not set - we default to "1.0" Logging *Logging // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" MinuteMetrics *Metrics - // The properties that enable an account to host a static website + // The properties that enable an account to host a static website. StaticWebsite *StaticWebsite } @@ -181,8 +193,45 @@ func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *ge return generated.StorageServiceProperties{}, nil } + defaultVersion := to.Ptr[string]("1.0") + defaultAge := to.Ptr[int32](0) + emptyStr := to.Ptr[string]("") + + if o.CORS != nil { + for i := 0; i < len(o.CORS); i++ { + if o.CORS[i].AllowedHeaders == nil { + o.CORS[i].AllowedHeaders = emptyStr + } + if o.CORS[i].ExposedHeaders == nil { + o.CORS[i].ExposedHeaders = emptyStr + } + if o.CORS[i].MaxAgeInSeconds == nil { + o.CORS[i].MaxAgeInSeconds = defaultAge + } + } + } + + if o.HourMetrics != nil { + if o.HourMetrics.Version == nil { + o.HourMetrics.Version = defaultVersion + } + } + + if o.Logging != nil { + if o.Logging.Version == nil { + o.Logging.Version = defaultVersion + } + } + + if o.MinuteMetrics != nil { + if o.MinuteMetrics.Version == nil { + o.MinuteMetrics.Version = defaultVersion + } + + } + return generated.StorageServiceProperties{ - Cors: o.Cors, + CORS: o.CORS, DefaultServiceVersion: o.DefaultServiceVersion, DeleteRetentionPolicy: o.DeleteRetentionPolicy, HourMetrics: o.HourMetrics, @@ -194,6 +243,27 @@ func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *ge // --------------------------------------------------------------------------------------------------------------------- +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // GetStatisticsOptions provides set of options for Client.GetStatistics type GetStatisticsOptions struct { // placeholder for future options @@ -205,7 +275,7 @@ func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOpt // --------------------------------------------------------------------------------------------------------------------- -// FilterBlobsOptions provides set of options for Client.FindBlobsByTags +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags. type FilterBlobsOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing @@ -219,8 +289,6 @@ type FilterBlobsOptions struct { // of the results. For this reason, it is possible that the service will // return fewer results than specified by maxresults, or than the default of 5000. MaxResults *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string } func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { @@ -230,6 +298,61 @@ func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions return &generated.ServiceClientFilterBlobsOptions{ Marker: o.Marker, Maxresults: o.MaxResults, - Where: o.Where, } } + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go index 788514cca..2dbf97165 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -7,6 +7,7 @@ package service import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -25,6 +26,9 @@ type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse // ListContainersResponse contains the response from method Client.ListContainersSegment. type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + // GetPropertiesResponse contains the response from method Client.GetProperties. type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse @@ -39,3 +43,21 @@ type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse // GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 6612feb4b..1841d146f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -59,6 +59,8 @@ added, it doesn't exist in real life. As such I've put a PEM decoder into here. // For details see https://aka.ms/msal-net-authenticationresult type AuthResult = base.AuthResult +type AuthenticationScheme = authority.AuthenticationScheme + type Account = shared.Account // CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file @@ -454,6 +456,33 @@ func WithClaims(claims string) interface { } } +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *acquireTokenByCredentialOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. // This option is valid for any token acquisition method. func WithTenantID(tenantID string) interface { @@ -499,6 +528,7 @@ func WithTenantID(tenantID string) interface { type acquireTokenSilentOptions struct { account Account claims, tenantID string + authnScheme AuthenticationScheme } // AcquireSilentOption is implemented by options for AcquireTokenSilent @@ -549,6 +579,7 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts Credential: cca.cred, IsAppCache: o.account.IsZero(), TenantID: o.tenantID, + AuthnScheme: o.authnScheme, } return cca.base.AcquireTokenSilent(ctx, silentParameters) @@ -614,6 +645,7 @@ func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redir // acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential type acquireTokenByCredentialOptions struct { claims, tenantID string + authnScheme AuthenticationScheme } // AcquireByCredentialOption is implemented by options for AcquireTokenByCredential @@ -637,7 +669,9 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, authParams.Scopes = scopes authParams.AuthorizationType = authority.ATClientCredentials authParams.Claims = o.claims - + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) if err != nil { return AuthResult{}, err diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 5f68384f6..09a0d92f5 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -54,6 +54,7 @@ type AcquireTokenSilentParameters struct { UserAssertion string AuthorizationType authority.AuthorizeType Claims string + AuthnScheme authority.AuthenticationScheme } // AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. @@ -289,6 +290,9 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen authParams.AuthorizationType = silent.AuthorizationType authParams.Claims = silent.Claims authParams.UserAssertion = silent.UserAssertion + if silent.AuthnScheme != nil { + authParams.AuthnScheme = silent.AuthnScheme + } m := b.pmanager if authParams.AuthorizationType != authority.ATOnBehalfOf { @@ -313,6 +317,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if silent.Claims == "" { ar, err = AuthResultFromStorage(storageTokenResponse) if err == nil { + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } } @@ -417,6 +422,11 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au if err == nil && b.cacheAccessor != nil { err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) } + if err != nil { + return AuthResult{}, err + } + + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go index 5d4c9f1d1..f9be90276 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -12,6 +12,7 @@ import ( internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" ) @@ -75,12 +76,14 @@ type AccessToken struct { ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` CachedAt internalTime.Unix `json:"cached_at,omitempty"` UserAssertionHash string `json:"user_assertion_hash,omitempty"` + TokenType string `json:"token_type,omitempty"` + AuthnSchemeKeyID string `json:"keyid,omitempty"` AdditionalFields map[string]interface{} } // NewAccessToken is the constructor for AccessToken. -func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { return AccessToken{ HomeAccountID: homeID, Environment: env, @@ -92,6 +95,8 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex CachedAt: internalTime.Unix{T: cachedAt.UTC()}, ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + TokenType: tokenType, + AuthnSchemeKeyID: authnSchemeKeyID, } } @@ -101,6 +106,11 @@ func (a AccessToken) Key() string { []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, shared.CacheKeySeparator, ) + // add token type to key for new access tokens types. skip for bearer token type to + // preserve fwd and back compat between a common cache and msal clients + if !strings.EqualFold(a.TokenType, authority.AccessTokenTypeBearer) { + key = strings.Join([]string{key, a.TokenType}, shared.CacheKeySeparator) + } return strings.ToLower(key) } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go index 5e1cae0b8..c09318330 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -41,6 +41,8 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() // fetch metadata if instanceDiscovery is enabled aliases := []string{authParameters.AuthorityInfo.Host} @@ -57,7 +59,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. - accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest, tokenType, authnSchemeKeyID) if err == nil { tr.AccessToken = accessToken } @@ -92,7 +94,7 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) userAssertionHash := authParameters.AssertionHash() cachedAt := time.Now() - + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() var account shared.Account if len(tokenResponse.RefreshToken) > 0 { @@ -116,6 +118,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, ) if authParameters.AuthorizationType == authority.ATOnBehalfOf { accessToken.UserAssertionHash = userAssertionHash // get Hash method on this @@ -215,7 +219,7 @@ func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo auth return m.aadCache[authorityInfo.Host], nil } -func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey, tokenType, authnSchemeKeyID string) (AccessToken, error) { m.contractMu.RLock() defer m.contractMu.RUnlock() if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { @@ -224,9 +228,11 @@ func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientI // an issue, however if it does become a problem then we know where to look. for _, at := range accessTokens { if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { - return at, nil + if at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } } } } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index d3a39e005..11263822b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -89,6 +89,8 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() // fetch metadata if instanceDiscovery is enabled aliases := []string{authParameters.AuthorityInfo.Host} @@ -100,7 +102,7 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) aliases = metadata.Aliases } - accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID) tr.AccessToken = accessToken if homeAccountID == "" { @@ -140,6 +142,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces clientID := authParameters.ClientID target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) cachedAt := time.Now() + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() var account shared.Account @@ -161,6 +164,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, ) // Since we have a valid access token, cache it before moving on. @@ -248,7 +253,7 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) return m.aadCache[authorityInfo.Host], nil } -func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken { m.contractMu.RLock() defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. @@ -256,9 +261,11 @@ func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, cli // an issue, however if it does become a problem then we know where to look. for _, at := range m.contract.AccessTokens { if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { - return at + if (at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } } } } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ebd86e2ba..ef8d908a4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -119,6 +119,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams return accesstokens.TokenResponse{}, err } return accesstokens.TokenResponse{ + TokenType: authParams.AuthnScheme.AccessTokenType(), AccessToken: tr.AccessToken, ExpiresOn: internalTime.DurationTime{ T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index 003d38648..a7b7b0742 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -380,6 +380,12 @@ func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.Auth func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { resp := TokenResponse{} + if authParams.AuthnScheme != nil { + trParams := authParams.AuthnScheme.TokenRequestParams() + for k, v := range trParams { + qv.Set(k, v) + } + } err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) if err != nil { return resp, err diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go index 3dd61d5b5..3107b45c1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -168,6 +168,7 @@ type TokenResponse struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` FamilyID string `json:"foci"` IDToken IDToken `json:"id_token"` diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 7b2ccb4f5..9d60734f8 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -29,6 +29,7 @@ const ( defaultAPIVersion = "2021-10-01" imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion autoDetectRegion = "TryAutoDetect" + AccessTokenTypeBearer = "Bearer" ) // These are various hosts that host AAD Instance discovery endpoints. @@ -138,6 +139,39 @@ const ( ADFS = "ADFS" ) +// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +type AuthenticationScheme interface { + // Extra parameters that are added to the request to the /token endpoint. + TokenRequestParams() map[string]string + // Key ID of the public / private key pair used by the encryption algorithm, if any. + // Tokens obtained by authentication schemes that use this are bound to the KeyId, i.e. + // if a different kid is presented, the access token cannot be used. + KeyID() string + // Creates the access token that goes into an Authorization HTTP header. + FormatAccessToken(accessToken string) (string, error) + //Expected to match the token_type parameter returned by ESTS. Used to disambiguate + // between ATs of different types (e.g. Bearer and PoP) when loading from cache etc. + AccessTokenType() string +} + +// default authn scheme realizing AuthenticationScheme for "Bearer" tokens +type BearerAuthenticationScheme struct{} + +var bearerAuthnScheme BearerAuthenticationScheme + +func (ba *BearerAuthenticationScheme) TokenRequestParams() map[string]string { + return nil +} +func (ba *BearerAuthenticationScheme) KeyID() string { + return "" +} +func (ba *BearerAuthenticationScheme) FormatAccessToken(accessToken string) (string, error) { + return accessToken, nil +} +func (ba *BearerAuthenticationScheme) AccessTokenType() string { + return AccessTokenTypeBearer +} + // AuthParams represents the parameters used for authorization for token acquisition. type AuthParams struct { AuthorityInfo Info @@ -180,6 +214,8 @@ type AuthParams struct { LoginHint string // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page DomainHint string + // AuthnScheme is an optional scheme for formatting access tokens + AuthnScheme AuthenticationScheme } // NewAuthParams creates an authorization parameters object. @@ -188,6 +224,7 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { ClientID: clientID, AuthorityInfo: authorityInfo, CorrelationID: uuid.New().String(), + AuthnScheme: &bearerAuthnScheme, } } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index 2ac2d09e4..eb16b405c 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "1.1.1" +const Version = "1.2.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 88b217ded..2221b3d33 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -47,6 +47,8 @@ import ( // For details see https://aka.ms/msal-net-authenticationresult type AuthResult = base.AuthResult +type AuthenticationScheme = authority.AuthenticationScheme + type Account = shared.Account var errNoAccount = errors.New("no account was specified with public.WithAccount(), or the specified account is invalid") @@ -211,6 +213,33 @@ func WithClaims(claims string) interface { } } +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *interactiveAuthOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. // This option is valid for any token acquisition method. func WithTenantID(tenantID string) interface { @@ -260,6 +289,7 @@ func WithTenantID(tenantID string) interface { type acquireTokenSilentOptions struct { account Account claims, tenantID string + authnScheme AuthenticationScheme } // AcquireSilentOption is implemented by options for AcquireTokenSilent @@ -310,6 +340,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts RequestType: accesstokens.ATPublic, IsAppCache: false, TenantID: o.tenantID, + AuthnScheme: o.authnScheme, } return pca.base.AcquireTokenSilent(ctx, silentParameters) @@ -482,6 +513,7 @@ func (pca Client) RemoveAccount(ctx context.Context, account Account) error { type interactiveAuthOptions struct { claims, domainHint, loginHint, redirectURI, tenantID string openURL func(url string) error + authnScheme AuthenticationScheme } // AcquireInteractiveOption is implemented by options for AcquireTokenInteractive @@ -628,6 +660,9 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, authParams.DomainHint = o.domainHint authParams.State = uuid.New().String() authParams.Prompt = "select_account" + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } res, err := pca.browserLogin(ctx, redirectURL, authParams, o.openURL) if err != nil { return AuthResult{}, err diff --git a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go index cad31f17f..af64a7b27 100644 --- a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go +++ b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go @@ -26,6 +26,7 @@ import ( "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" protobufdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" ) @@ -55,7 +56,7 @@ func StripSecretsCSI03(msg interface{}) fmt.Stringer { type stripSecrets struct { msg interface{} - isSecretField func(field *protobufdescriptor.FieldDescriptorProto) bool + isSecretField func(field *protobuf.FieldDescriptorProto) bool } func (s *stripSecrets) String() string { @@ -109,7 +110,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) { if _, ok := parsedFields[field.GetName()]; ok { parsedFields[field.GetName()] = "***stripped***" } - } else if field.GetType() == protobufdescriptor.FieldDescriptorProto_TYPE_MESSAGE { + } else if field.GetType() == protobuf.FieldDescriptorProto_TYPE_MESSAGE { // When we get here, // the type name is something like ".csi.v1.CapacityRange" (leading dot!) // and looking up "csi.v1.CapacityRange" @@ -149,7 +150,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) { // isCSI1Secret uses the csi.E_CsiSecret extension from CSI 1.0 to // determine whether a field contains secrets. -func isCSI1Secret(field *protobufdescriptor.FieldDescriptorProto) bool { +func isCSI1Secret(field *protobuf.FieldDescriptorProto) bool { ex, err := proto.GetExtension(field.Options, e_CsiSecret) return err == nil && ex != nil && *ex.(*bool) } @@ -171,6 +172,6 @@ var e_CsiSecret = &proto.ExtensionDesc{ // isCSI03Secret relies on the naming convention in CSI <= 0.3 // to determine whether a field contains secrets. -func isCSI03Secret(field *protobufdescriptor.FieldDescriptorProto) bool { +func isCSI03Secret(field *protobuf.FieldDescriptorProto) bool { return strings.HasSuffix(field.GetName(), "_secrets") } diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000..fbf1934a0 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 8a237c5d6..5e8158bba 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,39 +113,224 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s } // Delete removes the elements s[i:j] from s, returning the modified slice. // Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// Delete is O(len(s)-j), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + return append(s[:i], s[j:]...) } +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. func Clone[S ~[]E, E any](s S) S { @@ -169,35 +343,40 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -205,14 +384,116 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to +// to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd1..b67897f76 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,97 +13,157 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } -// SortStable sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // search returns the leftmost position where f returns true, or len(x) if f - // returns false for all x. This is the insertion position for target in x, - // and could point to an element that's either == target or not. - pos := search(len(x), func(i int) bool { return x[i] >= target }) - if pos >= len(x) || x[pos] != target { - return pos, false - } else { - return pos, true - } -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { - pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) - if pos >= len(x) || cmp(x[pos], target) != 0 { - return pos, false - } else { - return pos, true +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -func search(n int, f func(int) bool) int { - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. i, j := 0, n for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if !f(h) { - i = h + 1 // preserves f(i-1) == false + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 } else { - j = h // preserves f(j) == true + j = h // preserves cmp(x[j], target) >= 0 } } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 } type sortedHint int // hint for pdqsort when choosing the pivot @@ -125,3 +187,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476c..06f2c7a24 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b7..99b47c398 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 0c1b86793..2cb9c408f 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.7 +// +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index e31e35a90..64d31ecc3 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build go1.9 +// +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 065ff3dfa..7b6b68511 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !go1.7 +// +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index ec5a63803..1f9715341 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. //go:build !go1.9 +// +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index e6f55cbd1..a3067f8de 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,44 +20,41 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return new([1 << 10]byte) }}, - {New: func() interface{} { return new([2 << 10]byte) }}, - {New: func() interface{} { return new([4 << 10]byte) }}, - {New: func() interface{} { return new([8 << 10]byte) }}, - {New: func() interface{} { return new([16 << 10]byte) }}, -} +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) func getDataBufferChunk(size int64) []byte { - switch { - case size <= 1<<10: - return dataChunkPools[0].Get().(*[1 << 10]byte)[:] - case size <= 2<<10: - return dataChunkPools[1].Get().(*[2 << 10]byte)[:] - case size <= 4<<10: - return dataChunkPools[2].Get().(*[4 << 10]byte)[:] - case size <= 8<<10: - return dataChunkPools[3].Get().(*[8 << 10]byte)[:] - default: - return dataChunkPools[4].Get().(*[16 << 10]byte)[:] + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } } + return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - switch len(p) { - case 1 << 10: - dataChunkPools[0].Put((*[1 << 10]byte)(p)) - case 2 << 10: - dataChunkPools[1].Put((*[2 << 10]byte)(p)) - case 4 << 10: - dataChunkPools[2].Put((*[4 << 10]byte)(p)) - case 8 << 10: - dataChunkPools[3].Put((*[8 << 10]byte)(p)) - case 16 << 10: - dataChunkPools[4].Put((*[16 << 10]byte)(p)) - default: - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..5bf62b032 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go new file mode 100644 index 000000000..908af1ab9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.15 +// +build go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 000000000..aca4b2b31 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..cc0baa819 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go new file mode 100644 index 000000000..e6c04cf7a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.15 +// +build !go1.15 + +package http2 + +import ( + "context" + "crypto/tls" +) + +// dialTLSWithContext opens a TLS connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if cfg.InsecureSkipVerify { + return cn, nil + } + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + return cn, nil +} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 000000000..eab532c96 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408..02c88b6b3 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,6 +2549,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2668,6 +2669,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -2688,6 +2690,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -2699,6 +2702,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil @@ -2914,12 +2920,14 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) + }) != nil { + rws.dirty = true + } return } @@ -2984,10 +2992,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } } // Push errors. @@ -3170,7 +3187,6 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } - sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c..4515b22c4 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tc.NetConn(); nc != nil { + if nc := tlsUnderlyingConn(tc); nc != nil { nc.Close() } } @@ -3201,34 +3201,3 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index 712f1ad83..c5c4338db 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.18 +// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 7b3717884..64ccf85fe 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.10 +// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index cc6a892a4..ee1698cef 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.10 +// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 40e74bb3d..3aaccab1c 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.18 +// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index c6c2bf10a..d1d62ef45 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 76789393c..167efba71 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index 0600cd2ae..ab40f7bcc 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 2fb768ef6..66701eadf 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 5ff05fe1a..40033778f 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 +// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 0f25e84ca..4074b5332 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 +// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index 8a75b9667..bb63f904b 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build !go1.16 +// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index fa45bb907..7d68a8dc1 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,6 +5,7 @@ // license that can be found in the LICENSE file. //go:build go1.16 +// +build go1.16 package idna diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go deleted file mode 100644 index d02e6bbc8..000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 -// source: google/api/field_info.proto - -package annotations - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The standard format of a field value. The supported formats are all backed -// by either an RFC defined by the IETF or a Google-defined AIP. -type FieldInfo_Format int32 - -const ( - // Default, unspecified value. - FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 - // Universally Unique Identifier, version 4, value as defined by - // https://datatracker.ietf.org/doc/html/rfc4122. The value may be - // normalized to entirely lowercase letters. For example, the value - // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to - // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. - FieldInfo_UUID4 FieldInfo_Format = 1 - // Internet Protocol v4 value as defined by [RFC - // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be - // condensed, with leading zeros in each octet stripped. For example, - // `001.022.233.040` would be condensed to `1.22.233.40`. - FieldInfo_IPV4 FieldInfo_Format = 2 - // Internet Protocol v6 value as defined by [RFC - // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be - // normalized to entirely lowercase letters, and zero-padded partial and - // empty octets. For example, the value `2001:DB8::` would be normalized to - // `2001:0db8:0:0`. - FieldInfo_IPV6 FieldInfo_Format = 3 - // An IP address in either v4 or v6 format as described by the individual - // values defined herein. See the comments on the IPV4 and IPV6 types for - // allowed normalizations of each. - FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 -) - -// Enum value maps for FieldInfo_Format. -var ( - FieldInfo_Format_name = map[int32]string{ - 0: "FORMAT_UNSPECIFIED", - 1: "UUID4", - 2: "IPV4", - 3: "IPV6", - 4: "IPV4_OR_IPV6", - } - FieldInfo_Format_value = map[string]int32{ - "FORMAT_UNSPECIFIED": 0, - "UUID4": 1, - "IPV4": 2, - "IPV6": 3, - "IPV4_OR_IPV6": 4, - } -) - -func (x FieldInfo_Format) Enum() *FieldInfo_Format { - p := new(FieldInfo_Format) - *p = x - return p -} - -func (x FieldInfo_Format) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { - return file_google_api_field_info_proto_enumTypes[0].Descriptor() -} - -func (FieldInfo_Format) Type() protoreflect.EnumType { - return &file_google_api_field_info_proto_enumTypes[0] -} - -func (x FieldInfo_Format) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FieldInfo_Format.Descriptor instead. -func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { - return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} -} - -// Rich semantic information of an API field beyond basic typing. -type FieldInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The standard format of a field value. This does not explicitly configure - // any API consumer, just documents the API's format for the field it is - // applied to. - Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` -} - -func (x *FieldInfo) Reset() { - *x = FieldInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_api_field_info_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldInfo) ProtoMessage() {} - -func (x *FieldInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_api_field_info_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. -func (*FieldInfo) Descriptor() ([]byte, []int) { - return file_google_api_field_info_proto_rawDescGZIP(), []int{0} -} - -func (x *FieldInfo) GetFormat() FieldInfo_Format { - if x != nil { - return x.Format - } - return FieldInfo_FORMAT_UNSPECIFIED -} - -var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*FieldInfo)(nil), - Field: 291403980, - Name: "google.api.field_info", - Tag: "bytes,291403980,opt,name=field_info", - Filename: "google/api/field_info.proto", - }, -} - -// Extension fields to descriptorpb.FieldOptions. -var ( - // Rich semantic descriptor of an API field beyond the basic typing. - // - // Examples: - // - // string request_id = 1 [(google.api.field_info).format = UUID4]; - // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; - // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; - // string actual_ip_address = 4 [ - // (google.api.field_info).format = IPV4_OR_IPV6 - // ]; - // - // optional google.api.FieldInfo field_info = 291403980; - E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] -) - -var File_google_api_field_info_proto protoreflect.FileDescriptor - -var file_google_api_field_info_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_api_field_info_proto_rawDescOnce sync.Once - file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc -) - -func file_google_api_field_info_proto_rawDescGZIP() []byte { - file_google_api_field_info_proto_rawDescOnce.Do(func() { - file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) - }) - return file_google_api_field_info_proto_rawDescData -} - -var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_api_field_info_proto_goTypes = []interface{}{ - (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format - (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions -} -var file_google_api_field_info_proto_depIdxs = []int32{ - 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_api_field_info_proto_init() } -func file_google_api_field_info_proto_init() { - if File_google_api_field_info_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_api_field_info_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_api_field_info_proto_goTypes, - DependencyIndexes: file_google_api_field_info_proto_depIdxs, - EnumInfos: file_google_api_field_info_proto_enumTypes, - MessageInfos: file_google_api_field_info_proto_msgTypes, - ExtensionInfos: file_google_api_field_info_proto_extTypes, - }.Build() - File_google_api_field_info_proto = out.File - file_google_api_field_info_proto_rawDesc = nil - file_google_api_field_info_proto_goTypes = nil - file_google_api_field_info_proto_depIdxs = nil -} diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e4e740cad..e0811e834 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,8 @@ limitations under the License. package integer +import "math" + // IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { @@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 { } // RoundToInt32 rounds floats into integer numbers. +// Deprecated: use math.Round() and a cast directly. func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) + return int32(math.Round(a)) } diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index b8103223a..b673a6425 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain +// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare +// dereferenced pointers. package pointer import ( - "fmt" - "reflect" "time" + + "k8s.io/utils/ptr" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -28,383 +31,219 @@ import ( // // This function is only valid for structs and pointers to structs. Any other // type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -// Int returns a pointer to an int -func Int(i int) *int { - return &i -} +// +// Deprecated: Use ptr.AllPtrFieldsNil instead. +var AllPtrFieldsNil = ptr.AllPtrFieldsNil + +// Int returns a pointer to an int. +var Int = ptr.To[int] // IntPtr is a function variable referring to Int. // -// Deprecated: Use Int instead. +// Deprecated: Use ptr.To instead. var IntPtr = Int // for back-compat // IntDeref dereferences the int ptr and returns it if not nil, or else // returns def. -func IntDeref(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} +var IntDeref = ptr.Deref[int] // IntPtrDerefOr is a function variable referring to IntDeref. // -// Deprecated: Use IntDeref instead. +// Deprecated: Use ptr.Deref instead. var IntPtrDerefOr = IntDeref // for back-compat // Int32 returns a pointer to an int32. -func Int32(i int32) *int32 { - return &i -} +var Int32 = ptr.To[int32] // Int32Ptr is a function variable referring to Int32. // -// Deprecated: Use Int32 instead. +// Deprecated: Use ptr.To instead. var Int32Ptr = Int32 // for back-compat // Int32Deref dereferences the int32 ptr and returns it if not nil, or else // returns def. -func Int32Deref(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} +var Int32Deref = ptr.Deref[int32] // Int32PtrDerefOr is a function variable referring to Int32Deref. // -// Deprecated: Use Int32Deref instead. +// Deprecated: Use ptr.Deref instead. var Int32PtrDerefOr = Int32Deref // for back-compat // Int32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int32Equal(a, b *int32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int32Equal = ptr.Equal[int32] // Uint returns a pointer to an uint -func Uint(i uint) *uint { - return &i -} +var Uint = ptr.To[uint] // UintPtr is a function variable referring to Uint. // -// Deprecated: Use Uint instead. +// Deprecated: Use ptr.To instead. var UintPtr = Uint // for back-compat // UintDeref dereferences the uint ptr and returns it if not nil, or else // returns def. -func UintDeref(ptr *uint, def uint) uint { - if ptr != nil { - return *ptr - } - return def -} +var UintDeref = ptr.Deref[uint] // UintPtrDerefOr is a function variable referring to UintDeref. // -// Deprecated: Use UintDeref instead. +// Deprecated: Use ptr.Deref instead. var UintPtrDerefOr = UintDeref // for back-compat // Uint32 returns a pointer to an uint32. -func Uint32(i uint32) *uint32 { - return &i -} +var Uint32 = ptr.To[uint32] // Uint32Ptr is a function variable referring to Uint32. // -// Deprecated: Use Uint32 instead. +// Deprecated: Use ptr.To instead. var Uint32Ptr = Uint32 // for back-compat // Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else // returns def. -func Uint32Deref(ptr *uint32, def uint32) uint32 { - if ptr != nil { - return *ptr - } - return def -} +var Uint32Deref = ptr.Deref[uint32] // Uint32PtrDerefOr is a function variable referring to Uint32Deref. // -// Deprecated: Use Uint32Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint32PtrDerefOr = Uint32Deref // for back-compat // Uint32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint32Equal(a, b *uint32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint32Equal = ptr.Equal[uint32] // Int64 returns a pointer to an int64. -func Int64(i int64) *int64 { - return &i -} +var Int64 = ptr.To[int64] // Int64Ptr is a function variable referring to Int64. // -// Deprecated: Use Int64 instead. +// Deprecated: Use ptr.To instead. var Int64Ptr = Int64 // for back-compat // Int64Deref dereferences the int64 ptr and returns it if not nil, or else // returns def. -func Int64Deref(ptr *int64, def int64) int64 { - if ptr != nil { - return *ptr - } - return def -} +var Int64Deref = ptr.Deref[int64] // Int64PtrDerefOr is a function variable referring to Int64Deref. // -// Deprecated: Use Int64Deref instead. +// Deprecated: Use ptr.Deref instead. var Int64PtrDerefOr = Int64Deref // for back-compat // Int64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int64Equal(a, b *int64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int64Equal = ptr.Equal[int64] // Uint64 returns a pointer to an uint64. -func Uint64(i uint64) *uint64 { - return &i -} +var Uint64 = ptr.To[uint64] // Uint64Ptr is a function variable referring to Uint64. // -// Deprecated: Use Uint64 instead. +// Deprecated: Use ptr.To instead. var Uint64Ptr = Uint64 // for back-compat // Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else // returns def. -func Uint64Deref(ptr *uint64, def uint64) uint64 { - if ptr != nil { - return *ptr - } - return def -} +var Uint64Deref = ptr.Deref[uint64] // Uint64PtrDerefOr is a function variable referring to Uint64Deref. // -// Deprecated: Use Uint64Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint64PtrDerefOr = Uint64Deref // for back-compat // Uint64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint64Equal(a, b *uint64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint64Equal = ptr.Equal[uint64] // Bool returns a pointer to a bool. -func Bool(b bool) *bool { - return &b -} +var Bool = ptr.To[bool] // BoolPtr is a function variable referring to Bool. // -// Deprecated: Use Bool instead. +// Deprecated: Use ptr.To instead. var BoolPtr = Bool // for back-compat // BoolDeref dereferences the bool ptr and returns it if not nil, or else // returns def. -func BoolDeref(ptr *bool, def bool) bool { - if ptr != nil { - return *ptr - } - return def -} +var BoolDeref = ptr.Deref[bool] // BoolPtrDerefOr is a function variable referring to BoolDeref. // -// Deprecated: Use BoolDeref instead. +// Deprecated: Use ptr.Deref instead. var BoolPtrDerefOr = BoolDeref // for back-compat // BoolEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func BoolEqual(a, b *bool) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var BoolEqual = ptr.Equal[bool] // String returns a pointer to a string. -func String(s string) *string { - return &s -} +var String = ptr.To[string] // StringPtr is a function variable referring to String. // -// Deprecated: Use String instead. +// Deprecated: Use ptr.To instead. var StringPtr = String // for back-compat // StringDeref dereferences the string ptr and returns it if not nil, or else // returns def. -func StringDeref(ptr *string, def string) string { - if ptr != nil { - return *ptr - } - return def -} +var StringDeref = ptr.Deref[string] // StringPtrDerefOr is a function variable referring to StringDeref. // -// Deprecated: Use StringDeref instead. +// Deprecated: Use ptr.Deref instead. var StringPtrDerefOr = StringDeref // for back-compat // StringEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func StringEqual(a, b *string) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var StringEqual = ptr.Equal[string] // Float32 returns a pointer to a float32. -func Float32(i float32) *float32 { - return &i -} +var Float32 = ptr.To[float32] // Float32Ptr is a function variable referring to Float32. // -// Deprecated: Use Float32 instead. +// Deprecated: Use ptr.To instead. var Float32Ptr = Float32 // Float32Deref dereferences the float32 ptr and returns it if not nil, or else // returns def. -func Float32Deref(ptr *float32, def float32) float32 { - if ptr != nil { - return *ptr - } - return def -} +var Float32Deref = ptr.Deref[float32] // Float32PtrDerefOr is a function variable referring to Float32Deref. // -// Deprecated: Use Float32Deref instead. +// Deprecated: Use ptr.Deref instead. var Float32PtrDerefOr = Float32Deref // for back-compat // Float32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float32Equal(a, b *float32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float32Equal = ptr.Equal[float32] // Float64 returns a pointer to a float64. -func Float64(i float64) *float64 { - return &i -} +var Float64 = ptr.To[float64] // Float64Ptr is a function variable referring to Float64. // -// Deprecated: Use Float64 instead. +// Deprecated: Use ptr.To instead. var Float64Ptr = Float64 // Float64Deref dereferences the float64 ptr and returns it if not nil, or else // returns def. -func Float64Deref(ptr *float64, def float64) float64 { - if ptr != nil { - return *ptr - } - return def -} +var Float64Deref = ptr.Deref[float64] // Float64PtrDerefOr is a function variable referring to Float64Deref. // -// Deprecated: Use Float64Deref instead. +// Deprecated: Use ptr.Deref instead. var Float64PtrDerefOr = Float64Deref // for back-compat // Float64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float64Equal(a, b *float64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float64Equal = ptr.Equal[float64] // Duration returns a pointer to a time.Duration. -func Duration(d time.Duration) *time.Duration { - return &d -} +var Duration = ptr.To[time.Duration] // DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else // returns def. -func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { - if ptr != nil { - return *ptr - } - return def -} +var DurationDeref = ptr.Deref[time.Duration] // DurationEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func DurationEqual(a, b *time.Duration) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var DurationEqual = ptr.Equal[time.Duration] diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/ptr/README.md b/vendor/k8s.io/utils/ptr/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/ptr/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/ptr/ptr.go b/vendor/k8s.io/utils/ptr/ptr.go new file mode 100644 index 000000000..659ed3b9e --- /dev/null +++ b/vendor/k8s.io/utils/ptr/ptr.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// To returns a pointer to the given value. +func To[T any](v T) *T { + return &v +} + +// Deref dereferences ptr and returns the value it points to if no nil, or else +// returns def. +func Deref[T any](ptr *T, def T) T { + if ptr != nil { + return *ptr + } + return def +} + +// Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Equal[T comparable](a, b *T) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5709e22a5..cc074fdc1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontai # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 @@ -71,11 +71,12 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base @@ -113,7 +114,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -307,7 +308,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kubernetes-csi/csi-lib-utils v0.16.0 +# github.com/kubernetes-csi/csi-lib-utils v0.13.0 ## explicit; go 1.18 github.com/kubernetes-csi/csi-lib-utils/protosanitizer # github.com/kylelemons/godebug v1.1.0 @@ -545,12 +546,12 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20230905200255-921286631fa9 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.19.0 -## explicit; go 1.18 +# golang.org/x/net v0.17.0 +## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom @@ -629,16 +630,16 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 +# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status @@ -1513,7 +1514,7 @@ k8s.io/mount-utils ## explicit; go 1.20 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy -# k8s.io/utils v0.0.0-20230505201702-9f6742963106 +# k8s.io/utils v0.0.0-20231127182322-b307cd553661 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1530,6 +1531,7 @@ k8s.io/utils/net k8s.io/utils/nsenter k8s.io/utils/path k8s.io/utils/pointer +k8s.io/utils/ptr k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace @@ -1539,8 +1541,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231128114432-f80321a29d3a -## explicit; go 1.20 +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 +## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient @@ -1593,7 +1595,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231128114432-f80321a29d3a +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient @@ -1631,7 +1633,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231128114432-f80321a29d3a +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go index 55ddc722a..4fd05e8a9 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go @@ -17,11 +17,10 @@ limitations under the License. package azclient import ( + "os" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" - + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) @@ -33,21 +32,32 @@ type ARMClientConfig struct { // ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint // in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments. ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"` + // The AAD Tenant ID for the Subscription that the cluster is deployed in + TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"` + // The AAD Tenant ID for the Subscription that the network resources are deployed in. + NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"` +} + +func (config *ARMClientConfig) GetTenantID() string { + // these environment variables are injected by workload identity webhook + if tenantID := os.Getenv(utils.AzureTenantID); tenantID != "" { + return tenantID + } + return config.TenantID } -func NewClientOptionFromARMClientConfig(config *ARMClientConfig) (*policy.ClientOptions, error) { +func GetAzCoreClientOption(armConfig *ARMClientConfig) (*policy.ClientOptions, error) { //Get default settings - options := utils.GetDefaultOption() - var err error - if config != nil { + azCoreClientConfig := utils.GetDefaultAzCoreClientOption() + if armConfig != nil { //update user agent header - options.ClientOptions.Telemetry.ApplicationID = strings.TrimSpace(config.UserAgent) + azCoreClientConfig.Telemetry.ApplicationID = strings.TrimSpace(armConfig.UserAgent) //set cloud - var cloudConfig *cloud.Configuration - cloudConfig, err = GetAzureCloudConfig(config) - options.ClientOptions.Cloud = *cloudConfig - } else { - options.ClientOptions.Cloud = cloud.AzurePublic + cloudConfig, err := GetAzureCloudConfig(armConfig) + if err != nil { + return nil, err + } + azCoreClientConfig.Cloud = *cloudConfig } - return options, err + return &azCoreClientConfig, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go index 67f12af48..1bc5c5d3c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go @@ -24,39 +24,11 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "golang.org/x/crypto/pkcs12" - - "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// AzureAuthConfig holds auth related part of cloud config -type AzureAuthConfig struct { - // The AAD Tenant ID for the Subscription that the cluster is deployed in - TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"` - // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs - AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"` - // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs - AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"` - // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"` - // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"` - // Use managed service identity for the virtual machine to access Azure ARM APIs - UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"` - // UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used. - // More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview - // For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true. - UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"` - // The AAD Tenant ID for the Subscription that the network resources are deployed in. - NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"` - // The AAD federated token file - AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"` - // Use workload identity federation for the virtual machine to access Azure ARM APIs - UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"` -} - var ( // ErrorNoAuth indicates that no credentials are provided. ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider") @@ -71,39 +43,22 @@ type AuthProvider struct { ClientCertificateCredential azcore.TokenCredential } -func GetDefaultAuthClientOption(armConfig *ARMClientConfig) (*policy.ClientOptions, error) { - //Get default settings - options, err := NewClientOptionFromARMClientConfig(armConfig) +func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, clientOptionsMutFn ...func(option *policy.ClientOptions)) (*AuthProvider, error) { + clientOption, err := GetAzCoreClientOption(armConfig) if err != nil { return nil, err } - return options, nil -} - -func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) (*AuthProvider, error) { - if clientOption == nil { - clientOption = &policy.ClientOptions{} - } - // these environment variables are injected by workload identity webhook - if tenantID := os.Getenv(utils.AzureTenantID); tenantID != "" { - config.TenantID = tenantID + for _, fn := range clientOptionsMutFn { + fn(clientOption) } - if clientID := os.Getenv(utils.AzureClientID); clientID != "" { - config.AADClientID = clientID - } - var err error // federatedIdentityCredential is used for workload identity federation var federatedIdentityCredential azcore.TokenCredential - if federatedTokenFile := os.Getenv(utils.AzureFederatedTokenFile); federatedTokenFile != "" { - config.AADFederatedTokenFile = federatedTokenFile - config.UseFederatedWorkloadIdentityExtension = true - } - if config.UseFederatedWorkloadIdentityExtension { + if aadFederatedTokenFile, enabled := config.GetAzureFederatedTokenFile(); enabled { federatedIdentityCredential, err = azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ - ClientOptions: clientOption.ClientOptions, - ClientID: config.AADClientID, - TenantID: config.TenantID, - TokenFilePath: config.AADFederatedTokenFile, + ClientOptions: *clientOption, + ClientID: config.GetAADClientID(), + TenantID: armConfig.GetTenantID(), + TokenFilePath: aadFederatedTokenFile, }) if err != nil { return nil, err @@ -114,7 +69,7 @@ func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) var managedIdentityCredential azcore.TokenCredential if config.UseManagedIdentityExtension { credOptions := &azidentity.ManagedIdentityCredentialOptions{ - ClientOptions: clientOption.ClientOptions, + ClientOptions: *clientOption, } if len(config.UserAssignedIdentityID) > 0 { if strings.Contains(strings.ToUpper(config.UserAssignedIdentityID), "/SUBSCRIPTIONS/") { @@ -133,28 +88,28 @@ func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) var clientSecretCredential azcore.TokenCredential var networkClientSecretCredential azcore.TokenCredential var multiTenantCredential azcore.TokenCredential - if len(config.AADClientSecret) > 0 { + if len(config.GetAADClientSecret()) > 0 { credOptions := &azidentity.ClientSecretCredentialOptions{ - ClientOptions: clientOption.ClientOptions, + ClientOptions: *clientOption, } - clientSecretCredential, err = azidentity.NewClientSecretCredential(config.TenantID, config.AADClientID, config.AADClientSecret, credOptions) + clientSecretCredential, err = azidentity.NewClientSecretCredential(armConfig.GetTenantID(), config.GetAADClientID(), config.GetAADClientSecret(), credOptions) if err != nil { return nil, err } - if len(config.NetworkResourceTenantID) > 0 && !strings.EqualFold(config.NetworkResourceTenantID, config.TenantID) { + if len(armConfig.NetworkResourceTenantID) > 0 && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { credOptions := &azidentity.ClientSecretCredentialOptions{ - ClientOptions: clientOption.ClientOptions, + ClientOptions: *clientOption, } - networkClientSecretCredential, err = azidentity.NewClientSecretCredential(config.NetworkResourceTenantID, config.AADClientID, config.AADClientSecret, credOptions) + networkClientSecretCredential, err = azidentity.NewClientSecretCredential(armConfig.NetworkResourceTenantID, config.GetAADClientID(), config.AADClientSecret, credOptions) if err != nil { return nil, err } credOptions = &azidentity.ClientSecretCredentialOptions{ - ClientOptions: clientOption.ClientOptions, - AdditionallyAllowedTenants: []string{config.NetworkResourceTenantID}, + ClientOptions: *clientOption, + AdditionallyAllowedTenants: []string{armConfig.NetworkResourceTenantID}, } - multiTenantCredential, err = azidentity.NewClientSecretCredential(config.TenantID, config.AADClientID, config.AADClientSecret, credOptions) + multiTenantCredential, err = azidentity.NewClientSecretCredential(armConfig.GetTenantID(), config.GetAADClientID(), config.GetAADClientSecret(), credOptions) if err != nil { return nil, err } @@ -166,7 +121,7 @@ func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) var clientCertificateCredential azcore.TokenCredential if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { credOptions := &azidentity.ClientCertificateCredentialOptions{ - ClientOptions: clientOption.ClientOptions, + ClientOptions: *clientOption, SendCertificateChain: true, } certData, err := os.ReadFile(config.AADClientCertPath) @@ -177,7 +132,7 @@ func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) if err != nil { return nil, fmt.Errorf("decoding the client certificate: %w", err) } - clientCertificateCredential, err = azidentity.NewClientCertificateCredential(config.TenantID, config.AADClientID, []*x509.Certificate{certificate}, privateKey, credOptions) + clientCertificateCredential, err = azidentity.NewClientCertificateCredential(armConfig.GetTenantID(), config.GetAADClientID(), []*x509.Certificate{certificate}, privateKey, credOptions) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go new file mode 100644 index 000000000..4deeb1bb2 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go @@ -0,0 +1,69 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azclient + +import ( + "os" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +// AzureAuthConfig holds auth related part of cloud config +type AzureAuthConfig struct { + // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs + AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"` + // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs + AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"` + // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"` + // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"` + // Use managed service identity for the virtual machine to access Azure ARM APIs + UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"` + // UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used. + // More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview + // For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true. + UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"` + // The AAD federated token file + AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"` + // Use workload identity federation for the virtual machine to access Azure ARM APIs + UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"` +} + +func (config *AzureAuthConfig) GetAADClientID() string { + // these environment variables are injected by workload identity webhook + if clientID := os.Getenv(utils.AzureClientID); clientID != "" { + return clientID + } + return config.AADClientID +} + +func (config *AzureAuthConfig) GetAADClientSecret() string { + // these environment variables are injected by workload identity webhook + if clientSecret := os.Getenv(utils.AzureClientSecret); clientSecret != "" { + return clientSecret + } + return config.AADClientSecret +} + +func (config *AzureAuthConfig) GetAzureFederatedTokenFile() (string, bool) { + // these environment variables are injected by workload identity webhook + if clientCertPath := os.Getenv(utils.AzureFederatedTokenFile); clientCertPath != "" { + return clientCertPath, true + } + return config.AADClientCertPath, config.UseFederatedWorkloadIdentityExtension +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go index dd49602e8..b88e88049 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go @@ -118,21 +118,16 @@ func AzureCloudConfigOverrideFromEnv(config *cloud.Configuration) (*cloud.Config return config, nil } +// GetAzureCloudConfig returns the cloud configuration for the given ARMClientConfig. func GetAzureCloudConfig(armConfig *ARMClientConfig) (*cloud.Configuration, error) { - var config *cloud.Configuration - var err error if armConfig == nil { - config = &cloud.AzurePublic - } else { - config = AzureCloudConfigFromName(armConfig.Cloud) - if armConfig.ResourceManagerEndpoint != "" { - config, err = AzureCloudConfigFromURL(armConfig.ResourceManagerEndpoint) - if err != nil { - return nil, err - } - } + return &cloud.AzurePublic, nil } - return AzureCloudConfigOverrideFromEnv(config) + if armConfig.ResourceManagerEndpoint != "" { + return AzureCloudConfigFromURL(armConfig.ResourceManagerEndpoint) + } + + return AzureCloudConfigOverrideFromEnv(AzureCloudConfigFromName(armConfig.Cloud)) } // Environment represents a set of endpoints for each of Azure's Clouds. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go index 4b12a3cfc..f3b40edbc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go @@ -19,6 +19,7 @@ package azclient import ( "context" "net/http" + "strings" "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" @@ -53,17 +54,23 @@ type ClientFactoryConfig struct { } func GetDefaultResourceClientOption(armConfig *ARMClientConfig, factoryConfig *ClientFactoryConfig) (*policy.ClientOptions, error) { - //Get default settings - options, err := NewClientOptionFromARMClientConfig(armConfig) + armClientOption := policy.ClientOptions{} + options, err := GetAzCoreClientOption(armConfig) if err != nil { return nil, err } + armClientOption.ClientOptions = *options + + if armConfig != nil && armConfig.NetworkResourceTenantID != "" && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { + armClientOption.AuxiliaryTenants = []string{armConfig.NetworkResourceTenantID} + } + if factoryConfig != nil { //Set retry if !factoryConfig.CloudProviderBackoff { options.Retry.MaxRetries = 0 } } - options.Transport = DefaultResourceClientTransport - return options, err + armClientOption.ClientOptions.Transport = DefaultResourceClientTransport + return &armClientOption, err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go index d3808b544..767b16283 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go @@ -91,7 +91,7 @@ type ClientFactoryImpl struct { virtualnetworkclientInterface virtualnetworkclient.Interface } -func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, cred azcore.TokenCredential) (ClientFactory, error) { +func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, cred azcore.TokenCredential, clientOptionsMutFn ...func(option *arm.ClientOptions)) (ClientFactory, error) { if config == nil { config = &ClientFactoryConfig{} } @@ -107,10 +107,14 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - var ratelimitOption *ratelimit.Config var rateLimitPolicy policy.Policy + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } accountclientInterface, err := accountclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -127,6 +131,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } availabilitysetclientInterface, err := availabilitysetclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -137,7 +146,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } blobcontainerclientInterface, err := blobcontainerclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -148,7 +161,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } blobservicepropertiesclientInterface, err := blobservicepropertiesclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -165,6 +182,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } deploymentclientInterface, err := deploymentclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -181,6 +203,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } diskclientInterface, err := diskclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -191,7 +218,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } fileshareclientInterface, err := fileshareclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -208,6 +239,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } interfaceclientInterface, err := interfaceclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -224,6 +260,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } ipgroupclientInterface, err := ipgroupclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -240,6 +281,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } loadbalancerclientInterface, err := loadbalancerclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -256,6 +302,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } managedclusterclientInterface, err := managedclusterclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -272,6 +323,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } privateendpointclientInterface, err := privateendpointclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -288,6 +344,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } privatelinkserviceclientInterface, err := privatelinkserviceclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -304,6 +365,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } privatezoneclientInterface, err := privatezoneclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -314,7 +380,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } providerclientInterface, err := providerclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -331,6 +401,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } publicipaddressclientInterface, err := publicipaddressclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -341,7 +416,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } publicipprefixclientInterface, err := publicipprefixclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -352,7 +431,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } registryclientInterface, err := registryclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -363,7 +446,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } resourcegroupclientInterface, err := resourcegroupclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -380,6 +467,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } routetableclientInterface, err := routetableclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -390,7 +482,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } secretclientInterface, err := secretclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -407,6 +503,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } securitygroupclientInterface, err := securitygroupclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -423,6 +524,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } snapshotclientInterface, err := snapshotclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -433,7 +539,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } sshpublickeyresourceclientInterface, err := sshpublickeyresourceclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -450,6 +560,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } subnetclientInterface, err := subnetclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -460,7 +575,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } vaultclientInterface, err := vaultclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -477,6 +596,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } virtualmachineclientInterface, err := virtualmachineclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -493,6 +617,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } virtualmachinescalesetclientInterface, err := virtualmachinescalesetclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -503,7 +632,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } virtualmachinescalesetvmclientInterface, err := virtualmachinescalesetvmclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err @@ -514,7 +647,11 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c if err != nil { return nil, err } - + for _, optionMutFn := range clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } virtualnetworkclientInterface, err := virtualnetworkclient.New(config.SubscriptionID, cred, options) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go index 59ddc43f3..f21e34e7a 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go @@ -18,7 +18,7 @@ package utils const ( AzureClientID = "AZURE_CLIENT_ID" - AZURECLIENTSECRET = "AZURE_CLIENT_SECRET" //nolint:gosec + AzureClientSecret = "AZURE_CLIENT_SECRET" AzureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" AzureTenantID = "AZURE_TENANT_ID" ) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go index 4a1378139..435a48038 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go @@ -21,6 +21,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" @@ -33,26 +34,29 @@ const ( DefaultMaxRetries = 3 DefaultMaxRetryDelay = 60 * time.Second DefaultRetryDelay = 5 * time.Second - DefaultTryTimeout = 10 * time.Second ) func GetDefaultOption() *arm.ClientOptions { return &arm.ClientOptions{ - ClientOptions: policy.ClientOptions{ - Retry: policy.RetryOptions{ - RetryDelay: DefaultRetryDelay, - MaxRetryDelay: DefaultMaxRetryDelay, - MaxRetries: DefaultMaxRetries, - TryTimeout: DefaultTryTimeout, - StatusCodes: retryrepectthrottled.GetRetriableStatusCode(), - }, - PerRetryPolicies: []policy.Policy{ - retryrepectthrottled.NewThrottlingPolicy(), - }, - Transport: &http.Client{ - Transport: DefaultTransport, - }, - TracingProvider: TracingProvider, + ClientOptions: GetDefaultAzCoreClientOption(), + } +} + +func GetDefaultAzCoreClientOption() policy.ClientOptions { + return policy.ClientOptions{ + Retry: policy.RetryOptions{ + RetryDelay: DefaultRetryDelay, + MaxRetryDelay: DefaultMaxRetryDelay, + MaxRetries: DefaultMaxRetries, + StatusCodes: retryrepectthrottled.GetRetriableStatusCode(), + }, + PerRetryPolicies: []policy.Policy{ + retryrepectthrottled.NewThrottlingPolicy(), + }, + Transport: &http.Client{ + Transport: DefaultTransport, }, + TracingProvider: TracingProvider, + Cloud: cloud.AzurePublic, } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go index c9f6a044b..2dfb7c9ff 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go @@ -187,7 +187,7 @@ func NormalizeAzureRegion(name string) string { } // Send sends a http request to ARM service with possible retry to regional ARM endpoint. -func (c *Client) Send(ctx context.Context, request *http.Request, decorators ...autorest.SendDecorator) (*http.Response, *retry.Error) { +func (c *Client) Send(_ context.Context, request *http.Request, decorators ...autorest.SendDecorator) (*http.Response, *retry.Error) { response, err := autorest.SendWithSender( c.client, request, @@ -286,7 +286,7 @@ func (c *Client) WaitForAsyncOperationCompletion(ctx context.Context, future *az } // WaitForAsyncOperationResult waits for an operation result. -func (c *Client) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) { +func (c *Client) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, _ string) (*http.Response, error) { if err := future.WaitForCompletionRef(ctx, c.client); err != nil { klog.V(5).Infof("Received error in WaitForAsyncOperationCompletion: '%v'", err) return nil, err @@ -439,7 +439,7 @@ func (c *Client) PutResourcesInBatches(ctx context.Context, resources map[string } if batchSize > len(resources) { - klog.V(4).Infof("PutResourcesInBatches: batch size %d, but the number of the resources is %d", batchSize, resources) + klog.V(4).Infof("PutResourcesInBatches: batch size %d, but the number of the resources is %d", batchSize, len(resources)) batchSize = len(resources) } klog.V(4).Infof("PutResourcesInBatches: send sync requests in parallel with the batch size %d", batchSize) @@ -577,7 +577,7 @@ func (c *Client) PostResource(ctx context.Context, resourceID, action string, pa } // DeleteResource deletes a resource by resource ID -func (c *Client) DeleteResource(ctx context.Context, resourceID string, decorators ...autorest.PrepareDecorator) *retry.Error { +func (c *Client) DeleteResource(ctx context.Context, resourceID string, _ ...autorest.PrepareDecorator) *retry.Error { future, clientErr := c.DeleteResourceAsync(ctx, resourceID) if clientErr != nil { klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "delete.request", resourceID, clientErr.Error()) @@ -650,7 +650,7 @@ func (c *Client) DeleteResourceAsync(ctx context.Context, resourceID string, dec } // CloseResponse closes a response -func (c *Client) CloseResponse(ctx context.Context, response *http.Response) { +func (c *Client) CloseResponse(_ context.Context, response *http.Response) { if response != nil && response.Body != nil { if err := response.Body.Close(); err != nil { klog.Errorf("Error closing the response body: %v", err) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go index b429c6c9d..33576bab8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/azure_vmclient.go @@ -420,7 +420,7 @@ func (c *Client) WaitForUpdateResult(ctx context.Context, future *azure.Future, } // updateVM updates a VirtualMachine. -func (c *Client) updateVM(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (*compute.VirtualMachine, *retry.Error) { +func (c *Client) updateVM(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, _ string) (*compute.VirtualMachine, *retry.Error) { resourceID := armclient.GetResourceID( c.subscriptionID, resourceGroupName, @@ -583,7 +583,7 @@ func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, V } // createOrUpdateVM creates or updates a VirtualMachine. -func (c *Client) createOrUpdateVM(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) *retry.Error { +func (c *Client) createOrUpdateVM(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, _ string) *retry.Error { resourceID := armclient.GetResourceID( c.subscriptionID, resourceGroupName, diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go index 6ab02bdce..4ad702caa 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go @@ -525,7 +525,7 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM continue } if strings.Contains(errMsg, consts.ParentResourceNotFoundMessageCode) { - klog.V(2).Info("The parent resource of VMSS VM %s is not found, skip updating it.", resourceID) + klog.V(2).Infof("The parent resource of VMSS VM %s is not found, skip updating it.", resourceID) continue } if strings.Contains(errMsg, consts.CannotUpdateVMBeingDeletedMessagePrefix) && diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 52faba826..5d1ed7351 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -108,11 +108,11 @@ var ( // See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli // for more details. type Config struct { - ratelimitconfig.AzureAuthConfig - ratelimitconfig.CloudProviderRateLimitConfig + ratelimitconfig.AzureAuthConfig `json:",inline" yaml:",inline"` + ratelimitconfig.CloudProviderRateLimitConfig `json:",inline" yaml:",inline"` // The cloud configure type for Azure cloud provider. Supported values are file, secret and merge. - CloudConfigType cloudConfigType `json:"cloudConfigType,omitempty" yaml:"cloudConfigType,omitempty"` + CloudConfigType configloader.CloudConfigType `json:"cloudConfigType,omitempty" yaml:"cloudConfigType,omitempty"` // The name of the resource group that the cluster is deployed in ResourceGroup string `json:"resourceGroup,omitempty" yaml:"resourceGroup,omitempty"` @@ -565,12 +565,12 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, if config.CloudConfigType == "" { // The default cloud config type is cloudConfigTypeMerge. - config.CloudConfigType = cloudConfigTypeMerge + config.CloudConfigType = configloader.CloudConfigTypeMerge } else { supportedCloudConfigTypes := sets.New( - string(cloudConfigTypeMerge), - string(cloudConfigTypeFile), - string(cloudConfigTypeSecret)) + string(configloader.CloudConfigTypeMerge), + string(configloader.CloudConfigTypeFile), + string(configloader.CloudConfigTypeSecret)) if !supportedCloudConfigTypes.Has(string(config.CloudConfigType)) { return fmt.Errorf("cloudConfigType %v is not supported, supported values are %v", config.CloudConfigType, supportedCloudConfigTypes.UnsortedList()) } @@ -677,7 +677,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, // No credentials provided, useInstanceMetadata should be enabled for Kubelet. // TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're // requiring different credential settings. - if !config.UseInstanceMetadata && config.CloudConfigType == cloudConfigTypeFile { + if !config.UseInstanceMetadata && config.CloudConfigType == configloader.CloudConfigTypeFile { return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials") } @@ -871,7 +871,7 @@ func (az *Cloud) setLBDefaults(config *Config) error { return nil } -func (az *Cloud) getAuthTokenInMultiTenantEnv(servicePrincipalToken *adal.ServicePrincipalToken) (*adal.MultiTenantServicePrincipalToken, *adal.ServicePrincipalToken, error) { +func (az *Cloud) getAuthTokenInMultiTenantEnv(_ *adal.ServicePrincipalToken) (*adal.MultiTenantServicePrincipalToken, *adal.ServicePrincipalToken, error) { var err error var multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken var networkResourceServicePrincipalToken *adal.ServicePrincipalToken @@ -1096,7 +1096,7 @@ func (az *Cloud) isStackCloud() bool { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, _ <-chan struct{}) { az.KubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") az.eventBroadcaster = record.NewBroadcaster() az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.KubeClient.CoreV1().Events("")}) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go deleted file mode 100644 index ccfac7184..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_config.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -// The config type for Azure cloud provider secret. Supported values are: -// * file : The values are read from local cloud-config file. -// * secret : The values from secret would override all configures from local cloud-config file. -// * merge : The values from secret would override only configurations that are explicitly set in the secret. This is the default value. -type cloudConfigType string - -const ( - cloudConfigTypeFile cloudConfigType = "file" - cloudConfigTypeSecret cloudConfigType = "secret" - cloudConfigTypeMerge cloudConfigType = "merge" -) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 7e84e2e30..1248a57b1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -267,7 +267,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, return -1, err } - klog.V(2).Infof("Trying to attach volume %s lun %d to node %s, diskMap len:%d, %s", diskURI, lun, nodeName, len(diskMap), diskMap) + klog.V(2).Infof("Trying to attach volume %s lun %d to node %s, diskMap len:%d, %+v", diskURI, lun, nodeName, len(diskMap), diskMap) if len(diskMap) == 0 { if !c.DisableDiskLunCheck { // always check disk lun after disk attach complete @@ -669,20 +669,6 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N return attached, nil } -func filterDetachingDisks(unfilteredDisks []compute.DataDisk) []compute.DataDisk { - filteredDisks := []compute.DataDisk{} - for _, disk := range unfilteredDisks { - if disk.ToBeDetached != nil && *disk.ToBeDetached { - if disk.Name != nil { - klog.V(2).Infof("Filtering disk: %s with ToBeDetached flag set.", *disk.Name) - } - } else { - filteredDisks = append(filteredDisks, disk) - } - } - return filteredDisks -} - func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk { filteredDisks := []compute.DataDisk{} for _, disk := range unfilteredDisks { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 4ce4d9c47..0606ac4d4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -59,9 +59,8 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa if *disk.Lun == opt.lun { attached = true break - } else { - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) } + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) } } if attached { @@ -100,11 +99,11 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa }, }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, vmName, diskMap) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%v)", nodeResourceGroup, vmName, diskMap) future, rerr := as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") if rerr != nil { - klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) + klog.Errorf("azureDisk - attach disk list(%v) on rg(%s) vm(%s) failed, err: %+v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) disks := as.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) @@ -113,7 +112,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%v) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { return future, rerr.Error() } @@ -207,7 +206,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa }, }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) node(%s)- detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) var result *compute.VirtualMachine var rerr *retry.Error diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 2a2e2c227..6c666106b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -65,9 +65,9 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis if *disk.Lun == opt.lun { attached = true break - } else { - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) } + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) + } } if attached { @@ -107,10 +107,10 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, nodeName, diskMap) + klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, nodeName, diskMap) future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") if rerr != nil { - klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) + klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) @@ -119,7 +119,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s, %s) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) + klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) if rerr != nil { return future, rerr.Error() } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index 17c914dd4..ce4e0c3a0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -59,9 +59,8 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, if *disk.Lun == opt.lun { attached = true break - } else { - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) } + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) } } if attached { @@ -101,11 +100,11 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, vmName, diskMap) + klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, vmName, diskMap) future, rerr := fs.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, *vm.Name, newVM, "attach_disk") if rerr != nil { - klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) + klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) disks := fs.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) @@ -114,7 +113,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { return future, rerr.Error() } @@ -191,7 +190,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, } }() - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) node(%s)- detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) result, rerr = fs.VirtualMachinesClient.Update(ctx, nodeResourceGroup, *vm.Name, newVM, "detach_disk") if rerr != nil { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go index cf58e172f..0110aa553 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go @@ -18,8 +18,8 @@ package provider import ( "context" - "fmt" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "github.com/golang/mock/gomock" @@ -43,10 +43,6 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) -var ( - errPreconditionFailedEtagMismatch = fmt.Errorf("PreconditionFailedEtagMismatch") -) - // NewTestScaleSet creates a fake ScaleSet for unit test func NewTestScaleSet(ctrl *gomock.Controller) (*ScaleSet, error) { return newTestScaleSetWithState(ctrl) @@ -77,8 +73,11 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az = &Cloud{ Config: Config{ AzureAuthConfig: config.AzureAuthConfig{ - TenantID: "tenant", - SubscriptionID: "subscription", + ARMClientConfig: azclient.ARMClientConfig{ + TenantID: "TenantID", + }, + AzureAuthConfig: azclient.AzureAuthConfig{}, + SubscriptionID: "subscription", }, ResourceGroup: "rg", VnetResourceGroup: "rg", diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go index 99f4cc905..82406facf 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instance_metadata.go @@ -177,7 +177,7 @@ func (ims *InstanceMetadataService) getMetadata(key string) (interface{}, error) return instanceMetadata, nil } -func (ims *InstanceMetadataService) getInstanceMetadata(key string) (*InstanceMetadata, error) { +func (ims *InstanceMetadataService) getInstanceMetadata(_ string) (*InstanceMetadata, error) { req, err := http.NewRequest("GET", ims.imdsServer+consts.ImdsInstanceURI, nil) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go index 8682afd3b..e8a4a76f4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go @@ -71,7 +71,7 @@ func (az *Cloud) addressGetter(nodeName types.NodeName) ([]v1.NodeAddress, error } // NodeAddresses returns the addresses of the specified instance. -func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) { +func (az *Cloud) NodeAddresses(_ context.Context, name types.NodeName) ([]v1.NodeAddress, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(name)) if err != nil { @@ -223,7 +223,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri } // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes -func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { +func (az *Cloud) InstanceShutdownByProviderID(_ context.Context, providerID string) (bool, error) { if providerID == "" { return false, nil } @@ -293,7 +293,7 @@ func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) ( // InstanceID returns the cloud provider ID of the specified instance. // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) -func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) { +func (az *Cloud) InstanceID(_ context.Context, name types.NodeName) (string, error) { nodeName := mapNodeNameToVMName(name) unmanaged, err := az.IsNodeUnmanaged(nodeName) if err != nil { @@ -335,7 +335,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e return az.VMSet.GetInstanceIDByNodeName(nodeName) } -func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) { +func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, _ string) (string, error) { // Get resource group name and subscription ID. resourceGroup := strings.ToLower(metadata.Compute.ResourceGroup) subscriptionID := strings.ToLower(metadata.Compute.SubscriptionID) @@ -383,7 +383,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) // (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet: // Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] -func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) { +func (az *Cloud) InstanceType(_ context.Context, name types.NodeName) (string, error) { // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(name)) if err != nil { @@ -432,13 +432,13 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances // expected format for the key is standard ssh-keygen format: -func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { +func (az *Cloud) AddSSHKeyToAllInstances(_ context.Context, _ string, _ []byte) error { return cloudprovider.NotImplemented } // CurrentNodeName returns the name of the node we are currently running on. // On Azure this is the hostname, so we just return the hostname. -func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { +func (az *Cloud) CurrentNodeName(_ context.Context, hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index ef4d960bb..ad2e808f4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -85,7 +85,7 @@ func (az *Cloud) existsPip(clusterName string, service *v1.Service) bool { // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service -func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { +func (az *Cloud) GetLoadBalancer(_ context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { existingLBs, err := az.ListLB(service) if err != nil { return nil, az.existsPip(clusterName, service), err @@ -121,7 +121,7 @@ func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) { } // reconcileService reconcile the LoadBalancer service. It returns LoadBalancerStatus on success. -func (az *Cloud) reconcileService(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (az *Cloud) reconcileService(_ context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { serviceName := getServiceName(service) resourceBaseName := az.GetLoadBalancerName(context.TODO(), "", service) klog.V(2).Infof("reconcileService: Start reconciling Service %q with its resource basename %q", serviceName, resourceBaseName) @@ -300,7 +300,7 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser // doesn't exist even if some part of it is still laying around. // Implementations must treat the *v1.Service parameter as read-only and not modify it. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager -func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { +func (az *Cloud) EnsureLoadBalancerDeleted(_ context.Context, clusterName string, service *v1.Service) error { // Serialize service reconcile process az.serviceReconcileLock.Lock() defer az.serviceReconcileLock.Unlock() @@ -354,7 +354,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri // GetLoadBalancerName returns the name of the load balancer. Implementations must treat the // *v1.Service parameter as read-only and not modify it. -func (az *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { +func (az *Cloud) GetLoadBalancerName(_ context.Context, _ string, service *v1.Service) string { return cloudprovider.DefaultLoadBalancerName(service) } @@ -1561,7 +1561,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( svcs, err := az.KubeClient.CoreV1().Services("").List(context.Background(), metav1.ListOptions{}) if err != nil { - klog.Errorf("reconcileMultipleStandardLoadBalancerConfigurations: failed to list all load balancer services: %w", err) + klog.Errorf("reconcileMultipleStandardLoadBalancerConfigurations: failed to list all load balancer services: %V", err) return fmt.Errorf("failed to list all load balancer services: %w", err) } rulePrefixToSVCNameMap := make(map[string]string) @@ -3025,11 +3025,11 @@ func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, if len(existingPrefixes) == 1 { shouldDeleteNSGRule = true break //shared nsg rule has only one entry and entry owned by deleted svc has been found. skip the rest of the entries - } else { - newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) - sharedRule.DestinationAddressPrefixes = &newDestinations - updatedRules[sharedIndex] = sharedRule } + newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) + sharedRule.DestinationAddressPrefixes = &newDestinations + updatedRules[sharedIndex] = sharedRule + dirtySg = true } } @@ -3331,9 +3331,7 @@ func deduplicate(collection *[]string) *[]string { result := make([]string, 0, len(*collection)) for _, v := range *collection { - if seen[v] { - // skip this element - } else { + if !seen[v] { seen[v] = true result = append(result, v) } @@ -3908,27 +3906,6 @@ func useSharedSecurityRule(service *v1.Service) bool { return false } -func getServiceTags(service *v1.Service) []string { - if service == nil { - return nil - } - - if serviceTags, found := service.Annotations[consts.ServiceAnnotationAllowedServiceTags]; found { - result := []string{} - tags := strings.Split(strings.TrimSpace(serviceTags), ",") - for _, tag := range tags { - serviceTag := strings.TrimSpace(tag) - if serviceTag != "" { - result = append(result, serviceTag) - } - } - - return result - } - - return nil -} - // serviceOwnsPublicIP checks if the service owns the pip and if the pip is user-created. // The pip is user-created if and only if there is no service tags. // The service owns the pip if: @@ -4071,8 +4048,8 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri return addedNew, nil } -func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, - serviceName, clusterName string, isUserAssignedPIP bool) error { +func unbindServiceFromPIP(pip *network.PublicIPAddress, _ *v1.Service, + serviceName, _ string, isUserAssignedPIP bool) error { if pip == nil || pip.Tags == nil { return fmt.Errorf("nil public IP or tags") } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go index d1db1e382..67f213c32 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go @@ -66,7 +66,7 @@ func newBackendPoolTypeNodeIPConfig(c *Cloud) BackendPool { return &backendPoolTypeNodeIPConfig{c} } -func (bc *backendPoolTypeNodeIPConfig) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error { +func (bc *backendPoolTypeNodeIPConfig) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, _, _ string, _ network.BackendAddressPool) error { return bc.VMSet.EnsureHostsInPool(service, nodes, backendPoolID, vmSetName) } @@ -82,7 +82,7 @@ func isLBBackendPoolsExisting(lbBackendPoolNames map[bool]string, bpName *string return found, isIPv6 } -func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) { +func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, _ []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) { v4Enabled, v6Enabled := getIPFamiliesEnabled(service) lbBackendPoolNames := getBackendPoolNames(clusterName) @@ -388,7 +388,7 @@ func newBackendPoolTypeNodeIP(c *Cloud) BackendPool { return &backendPoolTypeNodeIP{c} } -func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID, vmSetName, clusterName, lbName string, backendPool network.BackendAddressPool) error { +func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, _, _, clusterName, lbName string, backendPool network.BackendAddressPool) error { isIPv6 := isBackendPoolIPv6(pointer.StringDeref(backendPool.Name, "")) vnetResourceGroup := bi.ResourceGroup if len(bi.VnetResourceGroup) > 0 { @@ -507,7 +507,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] return nil } -func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, service *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) { +func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalancer, _ *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*network.LoadBalancer, error) { lbBackendPoolNames := getBackendPoolNames(clusterName) newBackendPools := make([]network.BackendAddressPool, 0) if slb.LoadBalancerPropertiesFormat != nil && slb.BackendAddressPools != nil { @@ -760,7 +760,7 @@ func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(clusterName string, servic backendPrivateIPv6s.Insert(*ipAddress) } } else { - klog.V(4).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found null private IP") + klog.V(4).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found null private IP", serviceName) } } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go index 4b0f0c896..9cf7bce13 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go @@ -49,7 +49,7 @@ func (az *Cloud) buildClusterServiceSharedProbe() *network.Probe { // return nil if no new probe is added func (az *Cloud) buildHealthProbeRulesForPort(serviceManifest *v1.Service, port v1.ServicePort, lbrule string, healthCheckNodePortProbe *network.Probe, useSharedProbe bool) (*network.Probe, error) { if useSharedProbe { - klog.V(4).Infof("skip creating health probe for port %s because the shared probe is used", port.Port) + klog.V(4).Infof("skip creating health probe for port %d because the shared probe is used", port.Port) return nil, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go index f92c95c5d..83ac58487 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go @@ -303,7 +303,7 @@ func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( } if countIPsOnBackendPool(bp) != nicsCount { - klog.V(4).Infof("MigrateToIPBasedBackendPoolAndWaitForCompletion: Expected IPs %s, current IPs %d, will retry in 5s", nicsCount, countIPsOnBackendPool(bp)) + klog.V(4).Infof("MigrateToIPBasedBackendPoolAndWaitForCompletion: Expected IPs %d, current IPs %d, will retry in 5s", nicsCount, countIPsOnBackendPool(bp)) return false, nil } succeeded[bpName] = true diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go index 8dfe0cb8c..db4a6e998 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_privatelinkservice_repo.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) -func (az *Cloud) CreateOrUpdatePLS(service *v1.Service, resourceGroup string, pls network.PrivateLinkService) error { +func (az *Cloud) CreateOrUpdatePLS(_ *v1.Service, resourceGroup string, pls network.PrivateLinkService) error { ctx, cancel := getContextWithCancel() defer cancel() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go index 6d269394a..88f5de0da 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go @@ -93,7 +93,7 @@ func (d *delayedRouteUpdater) run(ctx context.Context) { d.updateRoutes() return false, nil }) - klog.Info("delayedRouteUpdater: stopped due to %s", err.Error()) + klog.Infof("delayedRouteUpdater: stopped due to %s", err.Error()) } // updateRoutes invokes route table client to update all routes. @@ -274,11 +274,11 @@ func (d *delayedRouteUpdater) addOperation(operation batchOperation) batchOperat return operation } -func (d *delayedRouteUpdater) removeOperation(name string) {} +func (d *delayedRouteUpdater) removeOperation(_ string) {} // ListRoutes lists all managed routes that belong to the specified clusterName // implements cloudprovider.Routes.ListRoutes -func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { +func (az *Cloud) ListRoutes(_ context.Context, clusterName string) ([]*cloudprovider.Route, error) { klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) routeTable, existsRouteTable, err := az.getRouteTable(azcache.CacheReadTypeDefault) routes, err := processRoutes(az.ipv6DualStackEnabled, routeTable, existsRouteTable, err) @@ -371,7 +371,7 @@ func (az *Cloud) createRouteTable() error { // route.Name will be ignored, although the cloud-provider may use nameHint // to create a more user-meaningful name. // implements cloudprovider.Routes.CreateRoute -func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error { +func (az *Cloud) CreateRoute(_ context.Context, clusterName string, _ string, kubeRoute *cloudprovider.Route) error { mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode)) isOperationSucceeded := false defer func() { @@ -447,7 +447,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s // DeleteRoute deletes the specified managed route // Route should be as returned by ListRoutes // implements cloudprovider.Routes.DeleteRoute -func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error { +func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *cloudprovider.Route) error { mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode)) isOperationSucceeded := false defer func() { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index 5bc0b27ee..8256dc219 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -984,7 +984,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. // backendPoolIDs are the IDs of the backendpools to be deleted. -func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolIDs []string, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) (bool, error) { +func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolIDs []string, vmSetName string, backendAddressPools *[]network.BackendAddressPool, _ bool) (bool, error) { // Returns nil if backend address pools already deleted. if backendAddressPools == nil { return false, nil @@ -1276,7 +1276,7 @@ func (as *availabilitySet) GetNodeCIDRMasksByProviderID(providerID string) (int, } // EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMAS -func (as *availabilitySet) EnsureBackendPoolDeletedFromVMSets(vmasNamesMap map[string]bool, backendPoolIDs []string) error { +func (as *availabilitySet) EnsureBackendPoolDeletedFromVMSets(_ map[string]bool, _ []string) error { return nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index 127c1b07c..d7602be72 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -47,6 +47,7 @@ type StorageType string const ( StorageTypeBlob StorageType = "blob" StorageTypeFile StorageType = "file" + blobNameSuffix = "-blob" ) // AccountOptions contains the fields which are used to create storage account. @@ -481,7 +482,7 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou // Create private endpoint privateEndpointName := accountName + "-pvtendpoint" if accountOptions.StorageType == StorageTypeBlob { - privateEndpointName = privateEndpointName + "-blob" + privateEndpointName = privateEndpointName + blobNameSuffix } if err := az.createPrivateEndpoint(ctx, accountName, storageAccount.ID, privateEndpointName, vnetResourceGroup, vnetName, subnetName, location, accountOptions.StorageType); err != nil { return "", "", fmt.Errorf("create private endpoint for storage account(%s), resourceGroup(%s): %w", accountName, vnetResourceGroup, err) @@ -490,7 +491,7 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou // Create dns zone group dnsZoneGroupName := accountName + "-dnszonegroup" if accountOptions.StorageType == StorageTypeBlob { - dnsZoneGroupName = dnsZoneGroupName + "-blob" + dnsZoneGroupName = dnsZoneGroupName + blobNameSuffix } if err := az.createPrivateDNSZoneGroup(ctx, dnsZoneGroupName, privateEndpointName, vnetResourceGroup, vnetName, privateDNSZoneName); err != nil { return "", "", fmt.Errorf("create private DNS zone group - privateEndpoint(%s), vNetName(%s), resourceGroup(%s): %w", privateEndpointName, vnetName, vnetResourceGroup, err) @@ -526,7 +527,7 @@ func (az *Cloud) createPrivateEndpoint(ctx context.Context, accountName string, //Create private endpoint privateLinkServiceConnectionName := accountName + "-pvtsvcconn" if storageType == StorageTypeBlob { - privateLinkServiceConnectionName = privateLinkServiceConnectionName + "-blob" + privateLinkServiceConnectionName = privateLinkServiceConnectionName + blobNameSuffix } privateLinkServiceConnection := network.PrivateLinkServiceConnection{ Name: &privateLinkServiceConnectionName, diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 9651be53b..3c069f0a2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -322,20 +322,6 @@ func isNodeInVMSSVMCache(nodeName string, vmssVMCache azcache.Resource) bool { return isInCache } -func extractVmssVMName(name string) (string, string, error) { - split := strings.SplitAfter(name, consts.VMSSNameSeparator) - if len(split) < 2 { - klog.V(3).Infof("Failed to extract vmssVMName %q", name) - return "", "", ErrorNotVmssInstance - } - - ssName := strings.Join(split[0:len(split)-1], "") - // removing the trailing `vmssNameSeparator` since we used SplitAfter - ssName = ssName[:len(ssName)-1] - instanceID := split[len(split)-1] - return ssName, instanceID, nil -} - // isServiceDualStack checks if a Service is dual-stack or not. func isServiceDualStack(svc *v1.Service) bool { return len(svc.Spec.IPFamilies) == 2 @@ -460,7 +446,7 @@ func getResourceByIPFamily(resource string, isDualStack, isIPv6 bool) string { // isFIPIPv6 checks if the frontend IP configuration is of IPv6. // NOTICE: isFIPIPv6 assumes the FIP is owned by the Service and it is the primary Service. -func (az *Cloud) isFIPIPv6(service *v1.Service, pipRG string, fip *network.FrontendIPConfiguration) (bool, error) { +func (az *Cloud) isFIPIPv6(service *v1.Service, _ string, fip *network.FrontendIPConfiguration) (bool, error) { isDualStack := isServiceDualStack(service) if !isDualStack { return service.Spec.IPFamilies[0] == v1.IPv6Protocol, nil @@ -477,24 +463,6 @@ func getResourceIDPrefix(id string) string { return id[:idx] } -// fillSubnet fills subnet value into the variable. -func (az *Cloud) fillSubnet(subnet *network.Subnet, subnetName string) error { - if subnet == nil { - return fmt.Errorf("subnet is nil, should not happen") - } - if subnet.ID == nil { - curSubnet, existsSubnet, err := az.getSubnet(az.VnetName, subnetName) - if err != nil { - return err - } - if !existsSubnet { - return fmt.Errorf("failed to get subnet: %s/%s", az.VnetName, subnetName) - } - *subnet = curSubnet - } - return nil -} - func getLBNameFromBackendPoolID(backendPoolID string) (string, error) { matches := backendPoolIDRE.FindStringSubmatch(backendPoolID) if len(matches) != 2 { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go index 5ab2b996b..d423a2dfd 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go @@ -706,16 +706,6 @@ func (ss *ScaleSet) getPrimaryInterfaceID(vm *virtualmachine.VirtualMachine) (st return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", pointer.StringDeref(machine.Name, "")) } -// getVmssMachineID returns the full identifier of a vmss virtual machine. -func (az *Cloud) getVmssMachineID(subscriptionID, resourceGroup, scaleSetName, instanceID string) string { - return fmt.Sprintf( - consts.VmssMachineIDTemplate, - subscriptionID, - strings.ToLower(resourceGroup), - scaleSetName, - instanceID) -} - // machineName is composed of computerNamePrefix and 36-based instanceID. // And instanceID part if in fixed length of 6 characters. // Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/. @@ -753,36 +743,6 @@ func extractResourceGroupByProviderID(providerID string) (string, error) { return matches[1], nil } -// listScaleSets lists all scale sets with orchestrationMode ScaleSetVM. -func (ss *ScaleSet) listScaleSets(resourceGroup string) ([]string, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup) - if rerr != nil { - klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr) - return nil, rerr.Error() - } - - ssNames := make([]string, 0) - for _, vmss := range allScaleSets { - name := *vmss.Name - if vmss.Sku != nil && pointer.Int64Deref(vmss.Sku.Capacity, 0) == 0 { - klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name) - continue - } - - if vmss.VirtualMachineScaleSetProperties == nil || vmss.VirtualMachineScaleSetProperties.VirtualMachineProfile == nil { - klog.V(3).Infof("VMSS %q orchestrationMode is VirtualMachine, skipping", name) - continue - } - - ssNames = append(ssNames, name) - } - - return ssNames, nil -} - // getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity. func (ss *ScaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { getter := func(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { @@ -1067,7 +1027,7 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). -func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { +func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -1199,7 +1159,7 @@ func getVmssAndResourceGroupNameByVMID(id string) (string, string, error) { return matches[1], matches[2], nil } -func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { +func (ss *ScaleSet) ensureVMSSInPool(_ *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID) vmssNamesMap := make(map[string]bool) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go index 42202a5af..c1a4f8893 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go @@ -555,7 +555,7 @@ func (fs *FlexScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.Nod } -func (fs *FlexScaleSet) ensureVMSSFlexInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { +func (fs *FlexScaleSet) ensureVMSSFlexInPool(_ *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { klog.V(2).Infof("ensureVMSSFlexInPool: ensuring VMSS Flex with backendPoolID %s", backendPoolID) vmssFlexIDsMap := make(map[string]bool) @@ -584,12 +584,12 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(service *v1.Service, nodes []*v1.No // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes vmssFlexID, err := fs.getNodeVmssFlexID(node.Name) if err != nil { - klog.Error("ensureVMSSFlexInPool: failed to get VMSS Flex ID of node: %s, will skip checking and continue", node.Name) + klog.Errorf("ensureVMSSFlexInPool: failed to get VMSS Flex ID of node: %s, will skip checking and continue", node.Name) continue } resourceGroupName, err := fs.GetNodeResourceGroup(node.Name) if err != nil { - klog.Error("ensureVMSSFlexInPool: failed to get resource group of node: %s, will skip checking and continue", node.Name) + klog.Errorf("ensureVMSSFlexInPool: failed to get resource group of node: %s, will skip checking and continue", node.Name) continue } @@ -601,7 +601,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(service *v1.Service, nodes []*v1.No } else { vmssFlexID, err := fs.getVmssFlexIDByName(vmSetNameOfLB) if err != nil { - klog.Error("ensureVMSSFlexInPool: failed to get VMSS Flex ID of vmSet: %s, ", vmSetNameOfLB) + klog.Errorf("ensureVMSSFlexInPool: failed to get VMSS Flex ID of vmSet: %s", vmSetNameOfLB) return err } vmssFlexIDsMap[vmssFlexID] = true diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go index 7260f850c..458e31f07 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go @@ -144,7 +144,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // DEPRECATED: Zones is deprecated in favor of retrieving zone/region information from InstancesV2. // This interface will not be called if InstancesV2 is enabled. // If the node is not running with availability zones, then it will fall back to fault domain. -func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { +func (az *Cloud) GetZone(_ context.Context) (cloudprovider.Zone, error) { if az.UseInstanceMetadata { metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeUnsafe) if err != nil { @@ -211,7 +211,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl // does not initialize node data. // DEPRECATED: Zones is deprecated in favor of retrieving zone/region information from InstancesV2. // This interface will not be called if InstancesV2 is enabled. -func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { +func (az *Cloud) GetZoneByNodeName(_ context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(nodeName)) if err != nil { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go index 7a0269166..e149bcac2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go @@ -34,6 +34,7 @@ import ( "k8s.io/klog/v2" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) @@ -48,40 +49,17 @@ const ( // AzureAuthConfig holds auth related part of cloud config type AzureAuthConfig struct { - // The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13 - Cloud string `json:"cloud,omitempty" yaml:"cloud,omitempty"` - // The AAD Tenant ID for the Subscription that the cluster is deployed in - TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"` - // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs - AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"` - // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs - AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"` - // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"` - // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs - AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"` - // Use managed service identity for the virtual machine to access Azure ARM APIs - UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"` - // UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used. - // More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview - // For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true. - UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"` + azclient.ARMClientConfig `json:",inline" yaml:",inline"` + azclient.AzureAuthConfig `json:",inline" yaml:",inline"` + // The ID of the Azure Subscription that the cluster is deployed in SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"` // IdentitySystem indicates the identity provider. Relevant only to hybrid clouds (Azure Stack). // Allowed values are 'azure_ad' (default), 'adfs'. IdentitySystem string `json:"identitySystem,omitempty" yaml:"identitySystem,omitempty"` - // ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint - // in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments. - ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"` - // The AAD Tenant ID for the Subscription that the network resources are deployed in - NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"` + // The ID of the Azure Subscription that the network resources are deployed in NetworkResourceSubscriptionID string `json:"networkResourceSubscriptionID,omitempty" yaml:"networkResourceSubscriptionID,omitempty"` - // The AAD federated token file - AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"` - // Use workload identity federation for the virtual machine to access Azure ARM APIs - UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"` } // GetServicePrincipalToken creates a new service principal token based on the configuration. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go index 9183142cd..ea3be3059 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_ratelimit.go @@ -29,7 +29,7 @@ const ( // CloudProviderRateLimitConfig indicates the rate limit config for each clients. type CloudProviderRateLimitConfig struct { // The default rate limit config options. - azclients.RateLimitConfig + azclients.RateLimitConfig `json:",inline" yaml:",inline"` // Rate limit config for each clients. Values would override default settings above. RouteRateLimit *azclients.RateLimitConfig `json:"routeRateLimit,omitempty" yaml:"routeRateLimit,omitempty"` diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go index ef47fe476..81691fc89 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go @@ -296,14 +296,14 @@ func GetStatusNotFoundAndForbiddenIgnoredError(resp *http.Response, err error) * // Returns nil when it is StatusNotFound error. if rerr.HTTPStatusCode == http.StatusNotFound { - klog.V(3).Infof("Ignoring StatusNotFound error: %w", rerr) + klog.V(3).Infof("Ignoring StatusNotFound error: %+v", rerr) return nil } // Returns nil if the status code is StatusForbidden. // This happens when AuthorizationFailed is reported from Azure API. if rerr.HTTPStatusCode == http.StatusForbidden { - klog.V(3).Infof("Ignoring StatusForbidden error: %w", rerr) + klog.V(3).Infof("Ignoring StatusForbidden error: %+v", rerr) return nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go index 4093b6ba4..81eb23a58 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_retry.go @@ -20,7 +20,6 @@ import ( "html" "math/rand" "net/http" - "strings" "time" "github.com/Azure/go-autorest/autorest" @@ -86,21 +85,6 @@ func (b *Backoff) WithRetriableHTTPStatusCodes(httpStatusCodes []int) *Backoff { return &newBackoff } -// isNonRetriableError returns true if the Error is one of NonRetriableErrors. -func (b *Backoff) isNonRetriableError(rerr *Error) bool { - if rerr == nil { - return false - } - - for _, err := range b.NonRetriableErrors { - if strings.Contains(rerr.RawError.Error(), err) { - return true - } - } - - return false -} - // Step (1) returns an amount of time to sleep determined by the // original Duration and Jitter and (2) mutates the provided Backoff // to update its Steps and Duration. @@ -161,7 +145,7 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff Backoff) (resp * for backoff.Steps > 0 { err = rr.Prepare() if err != nil { - return + return resp, err } resp, err = s.Do(rr.Request()) rerr := GetError(resp, err) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go index 4282ad14a..9ba7d8699 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy/deepcopy.go @@ -66,12 +66,12 @@ func copyNormal(src interface{}) interface{} { to := reflect.New(from.Type()).Elem() - copy(from, to) + copyCustomimpl(from, to) return to.Interface() } -func copy(from, to reflect.Value) { +func copyCustomimpl(from, to reflect.Value) { // Check if DeepCopy() is already implemented for the interface if from.CanInterface() { if deepcopy, ok := from.Interface().(deepCopyInterface); ok { @@ -88,7 +88,7 @@ func copy(from, to reflect.Value) { } to.Set(reflect.New(fromValue.Type())) - copy(fromValue, to.Elem()) + copyCustomimpl(fromValue, to.Elem()) case reflect.Interface: if from.IsNil() { @@ -97,7 +97,7 @@ func copy(from, to reflect.Value) { fromValue := from.Elem() toValue := reflect.New(fromValue.Type()).Elem() - copy(fromValue, toValue) + copyCustomimpl(fromValue, toValue) to.Set(toValue) case reflect.Struct: @@ -106,7 +106,7 @@ func copy(from, to reflect.Value) { // It is an unexported field. continue } - copy(from.Field(i), to.Field(i)) + copyCustomimpl(from.Field(i), to.Field(i)) } case reflect.Slice: @@ -116,7 +116,7 @@ func copy(from, to reflect.Value) { to.Set(reflect.MakeSlice(from.Type(), from.Len(), from.Cap())) for i := 0; i < from.Len(); i++ { - copy(from.Index(i), to.Index(i)) + copyCustomimpl(from.Index(i), to.Index(i)) } case reflect.Map: @@ -128,7 +128,7 @@ func copy(from, to reflect.Value) { for _, key := range from.MapKeys() { fromValue := from.MapIndex(key) toValue := reflect.New(fromValue.Type()).Elem() - copy(fromValue, toValue) + copyCustomimpl(fromValue, toValue) copiedKey := Copy(key.Interface()) to.SetMapIndex(reflect.ValueOf(copiedKey), toValue) } From f08a64b9c6060a756fd682ec633e4ddcd01d3b09 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 11 Dec 2023 08:45:47 +0000 Subject: [PATCH 009/157] test: run k8s 1.27 external e2e tests --- test/external-e2e/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/external-e2e/run.sh b/test/external-e2e/run.sh index 64caa2b2e..8e952f469 100755 --- a/test/external-e2e/run.sh +++ b/test/external-e2e/run.sh @@ -21,7 +21,7 @@ DRIVER="blob" setup_e2e_binaries() { # download k8s external e2e binary - curl -sL https://dl.k8s.io/release/v1.26.4/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz + curl -sL https://dl.k8s.io/release/v1.27.4/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz export EXTRA_HELM_OPTIONS="--set driver.name=$DRIVER.csi.azure.com --set controller.name=csi-$DRIVER-controller --set node.name=csi-$DRIVER-node --set driver.azureGoSDKLogLevel=INFO" From cb9e850f384c1b8e2a45b88c10af835837308a1c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 11 Dec 2023 10:53:18 +0000 Subject: [PATCH 010/157] test: add more external e2e tests --- test/external-e2e/testdriver-blobfuse.yaml | 2 ++ test/external-e2e/testdriver-nfs.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/external-e2e/testdriver-blobfuse.yaml b/test/external-e2e/testdriver-blobfuse.yaml index debd66558..65da74865 100644 --- a/test/external-e2e/testdriver-blobfuse.yaml +++ b/test/external-e2e/testdriver-blobfuse.yaml @@ -18,3 +18,5 @@ DriverInfo: volumeLimits: false snapshotDataSource: false pvcDataSource: true + multiplePVsSameID: true + readWriteOncePod: true diff --git a/test/external-e2e/testdriver-nfs.yaml b/test/external-e2e/testdriver-nfs.yaml index 8a7b07ca7..18c208aa0 100644 --- a/test/external-e2e/testdriver-nfs.yaml +++ b/test/external-e2e/testdriver-nfs.yaml @@ -18,3 +18,5 @@ DriverInfo: volumeLimits: false snapshotDataSource: false pvcDataSource: true + multiplePVsSameID: true + readWriteOncePod: true From ff1af2ca511be4a8cb91ebff75a4c6cc7db424dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 16:31:09 +0000 Subject: [PATCH 011/157] chore(deps): bump golang.org/x/net from 0.17.0 to 0.19.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.17.0 to 0.19.0. - [Commits](https://github.com/golang/net/compare/v0.17.0...v0.19.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- vendor/golang.org/x/net/context/go17.go | 1 - vendor/golang.org/x/net/context/go19.go | 1 - vendor/golang.org/x/net/context/pre_go17.go | 1 - vendor/golang.org/x/net/context/pre_go19.go | 1 - vendor/golang.org/x/net/http2/databuffer.go | 59 ++++++++++---------- vendor/golang.org/x/net/http2/go111.go | 30 ---------- vendor/golang.org/x/net/http2/go115.go | 27 --------- vendor/golang.org/x/net/http2/go118.go | 17 ------ vendor/golang.org/x/net/http2/not_go111.go | 21 ------- vendor/golang.org/x/net/http2/not_go115.go | 31 ---------- vendor/golang.org/x/net/http2/not_go118.go | 17 ------ vendor/golang.org/x/net/http2/server.go | 24 ++------ vendor/golang.org/x/net/http2/transport.go | 33 ++++++++++- vendor/golang.org/x/net/idna/go118.go | 1 - vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - vendor/golang.org/x/net/idna/pre_go118.go | 1 - vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - vendor/modules.txt | 4 +- 28 files changed, 72 insertions(+), 213 deletions(-) delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/go115.go delete mode 100644 vendor/golang.org/x/net/http2/go118.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go diff --git a/go.mod b/go.mod index 50c8225f3..6a9e452a0 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/stretchr/testify v1.8.4 - golang.org/x/net v0.17.0 + golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 k8s.io/api v0.28.4 diff --git a/go.sum b/go.sum index c9d87e875..24f74efc1 100644 --- a/go.sum +++ b/go.sum @@ -345,8 +345,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 2cb9c408f..0c1b86793 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 -// +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index 64d31ecc3..e31e35a90 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 7b6b68511..065ff3dfa 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index 1f9715341..ec5a63803 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.9 -// +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8de..e6f55cbd1 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b032..000000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab9..000000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b31..000000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa819..000000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7a..000000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c96..000000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b3..ae94c6408 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c4..df578b86c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338db..712f1ad83 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85fe..7b3717884 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698cef..cc6a892a4 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1c..40e74bb3d 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef45..c6c2bf10a 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba71..76789393c 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bcc..0600cd2ae 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701eadf..2fb768ef6 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778f..5ff05fe1a 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b5332..0f25e84ca 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904b..8a75b9667 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc1..fa45bb907 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/modules.txt b/vendor/modules.txt index cc074fdc1..7b12f03e3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -550,8 +550,8 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.17.0 -## explicit; go 1.17 +# golang.org/x/net v0.19.0 +## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom From 486e50b6492826e4f77417c351273df7451049b8 Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Mon, 11 Dec 2023 16:54:06 +0800 Subject: [PATCH 012/157] Refactor: Extract kubeclient from cloud provider Signed-off-by: Fan Shang Xiang --- pkg/blob/azure.go | 42 ++------------ pkg/blob/azure_test.go | 95 ++++--------------------------- pkg/blob/blob.go | 30 ++++++++-- pkg/blob/blob_test.go | 33 ++++++++--- pkg/blob/controllerserver_test.go | 4 +- pkg/blobplugin/main.go | 70 ++++++++--------------- pkg/util/util.go | 23 ++++++++ pkg/util/util_test.go | 76 +++++++++++++++++++++++++ test/e2e/suite_test.go | 7 ++- 9 files changed, 196 insertions(+), 184 deletions(-) diff --git a/pkg/blob/azure.go b/pkg/blob/azure.go index c0bff4421..a99a59653 100644 --- a/pkg/blob/azure.go +++ b/pkg/blob/azure.go @@ -17,7 +17,6 @@ limitations under the License. package blob import ( - "errors" "fmt" "os" "strings" @@ -27,9 +26,7 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest" "golang.org/x/net/context" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/utils/pointer" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" @@ -45,36 +42,20 @@ var ( // IsAzureStackCloud decides whether the driver is running on Azure Stack Cloud. func IsAzureStackCloud(cloud *azure.Cloud) bool { - return !cloud.Config.DisableAzureStackCloud && strings.EqualFold(cloud.Config.Cloud, "AZURESTACKCLOUD") + return !cloud.DisableAzureStackCloud && strings.EqualFold(cloud.Cloud, "AZURESTACKCLOUD") } // getCloudProvider get Azure Cloud Provider -func GetCloudProvider(ctx context.Context, kubeconfig, nodeID, secretName, secretNamespace, userAgent string, allowEmptyCloudConfig bool, kubeAPIQPS float64, kubeAPIBurst int) (*azure.Cloud, error) { +func GetCloudProvider(ctx context.Context, kubeClient kubernetes.Interface, nodeID, secretName, secretNamespace, userAgent string, allowEmptyCloudConfig bool) (*azure.Cloud, error) { var ( config *azure.Config - kubeClient *clientset.Clientset fromSecret bool + err error ) az := &azure.Cloud{} az.Environment.StorageEndpointSuffix = storage.DefaultBaseURL - kubeCfg, err := getKubeConfig(kubeconfig) - if err == nil && kubeCfg != nil { - // set QPS and QPS Burst as higher values - klog.V(2).Infof("set QPS(%f) and QPS Burst(%d) for driver kubeClient", float32(kubeAPIQPS), kubeAPIBurst) - kubeCfg.QPS = float32(kubeAPIQPS) - kubeCfg.Burst = kubeAPIBurst - if kubeClient, err = clientset.NewForConfig(kubeCfg); err != nil { - klog.Warningf("NewForConfig failed with error: %v", err) - } - } else { - klog.Warningf("get kubeconfig(%s) failed with error: %v", kubeconfig, err) - if !os.IsNotExist(err) && !errors.Is(err, rest.ErrNotInCluster) { - return az, fmt.Errorf("failed to get KubeClient: %w", err) - } - } - if kubeClient != nil { az.KubeClient = kubeClient klog.V(2).Infof("reading cloud config from secret %s/%s", secretNamespace, secretName) @@ -180,7 +161,7 @@ func (d *Driver) initializeKvClient() (*kv.BaseClient, error) { func (d *Driver) getKeyvaultToken() (authorizer autorest.Authorizer, err error) { env := d.cloud.Environment kvEndPoint := strings.TrimSuffix(env.KeyVaultEndpoint, "/") - servicePrincipalToken, err := providerconfig.GetServicePrincipalToken(&d.cloud.Config.AzureAuthConfig, &env, kvEndPoint) + servicePrincipalToken, err := providerconfig.GetServicePrincipalToken(&d.cloud.AzureAuthConfig, &env, kvEndPoint) if err != nil { return nil, err } @@ -255,16 +236,3 @@ func (d *Driver) updateSubnetServiceEndpoints(ctx context.Context, vnetResourceG return nil } - -func getKubeConfig(kubeconfig string) (config *rest.Config, err error) { - if kubeconfig != "" { - if config, err = clientcmd.BuildConfigFromFlags("", kubeconfig); err != nil { - return nil, err - } - } else { - if config, err = rest.InClusterConfig(); err != nil { - return nil, err - } - } - return config, err -} diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index ce093fad6..4ba69d1a7 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -31,6 +31,7 @@ import ( "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" + "sigs.k8s.io/blob-csi-driver/pkg/util" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient" azureprovider "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -114,7 +115,7 @@ users: kubeconfig: emptyKubeConfig, nodeID: "", allowEmptyCloudConfig: true, - expectedErr: fmt.Errorf("failed to get KubeClient: invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"), + expectedErr: fmt.Errorf("invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"), }, { desc: "[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file", @@ -168,7 +169,14 @@ users: } os.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile) } - cloud, err := GetCloudProvider(context.Background(), test.kubeconfig, test.nodeID, "", "", test.userAgent, test.allowEmptyCloudConfig, 25.0, 50) + kubeClient, err := util.GetKubeClient(test.kubeconfig, 25.0, 50, "") + if err != nil { + if !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) { + t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) + } + continue + } + cloud, err := GetCloudProvider(context.Background(), kubeClient, test.nodeID, "", "", test.userAgent, test.allowEmptyCloudConfig) if !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) { t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) } @@ -374,86 +382,3 @@ func TestUpdateSubnetServiceEndpoints(t *testing.T) { t.Run(tc.name, tc.testFunc) } } - -func TestGetKubeConfig(t *testing.T) { - emptyKubeConfig := "empty-Kube-Config" - validKubeConfig := "valid-Kube-Config" - fakeContent := ` -apiVersion: v1 -clusters: -- cluster: - server: https://localhost:8080 - name: foo-cluster -contexts: -- context: - cluster: foo-cluster - user: foo-user - namespace: bar - name: foo-context -current-context: foo-context -kind: Config -users: -- name: foo-user - user: - exec: - apiVersion: client.authentication.k8s.io/v1beta1 - args: - - arg-1 - - arg-2 - command: foo-command -` - err := createTestFile(emptyKubeConfig) - if err != nil { - t.Error(err) - } - defer func() { - if err := os.Remove(emptyKubeConfig); err != nil { - t.Error(err) - } - }() - - err = createTestFile(validKubeConfig) - if err != nil { - t.Error(err) - } - defer func() { - if err := os.Remove(validKubeConfig); err != nil { - t.Error(err) - } - }() - - if err := os.WriteFile(validKubeConfig, []byte(fakeContent), 0666); err != nil { - t.Error(err) - } - - tests := []struct { - desc string - kubeconfig string - expectError bool - envVariableHasConfig bool - envVariableConfigIsValid bool - }{ - { - desc: "[success] valid kube config passed", - kubeconfig: validKubeConfig, - expectError: false, - envVariableHasConfig: false, - envVariableConfigIsValid: false, - }, - { - desc: "[failure] invalid kube config passed", - kubeconfig: emptyKubeConfig, - expectError: true, - envVariableHasConfig: false, - envVariableConfigIsValid: false, - }, - } - - for _, test := range tests { - _, err := getKubeConfig(test.kubeconfig) - receiveError := (err != nil) - if test.expectError != receiveError { - t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectErr: %v", test.desc, test.kubeconfig, err, test.expectError) - } - } -} diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 41c6515b2..0f6e693e1 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -17,6 +17,8 @@ limitations under the License. package blob import ( + "context" + "flag" "fmt" "os" "strconv" @@ -29,8 +31,6 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/pborman/uuid" - "golang.org/x/net/context" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,6 +43,7 @@ import ( csicommon "sigs.k8s.io/blob-csi-driver/pkg/csi-common" "sigs.k8s.io/blob-csi-driver/pkg/util" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" ) @@ -167,11 +168,29 @@ type DriverOptions struct { SasTokenExpirationMinutes int } +func (option *DriverOptions) AddFlags() { + flag.StringVar(&option.BlobfuseProxyEndpoint, "blobfuse-proxy-endpoint", "unix://tmp/blobfuse-proxy.sock", "blobfuse-proxy endpoint") + flag.StringVar(&option.NodeID, "nodeid", "", "node id") + flag.StringVar(&option.DriverName, "drivername", DefaultDriverName, "name of the driver") + flag.BoolVar(&option.EnableBlobfuseProxy, "enable-blobfuse-proxy", false, "using blobfuse proxy for mounts") + flag.IntVar(&option.BlobfuseProxyConnTimout, "blobfuse-proxy-connect-timeout", 5, "blobfuse proxy connection timeout(seconds)") + flag.BoolVar(&option.EnableBlobMockMount, "enable-blob-mock-mount", false, "enable mock mount(only for testing)") + flag.BoolVar(&option.EnableGetVolumeStats, "enable-get-volume-stats", false, "allow GET_VOLUME_STATS on agent node") + flag.BoolVar(&option.AppendTimeStampInCacheDir, "append-timestamp-cache-dir", false, "append timestamp into cache directory on agent node") + flag.Uint64Var(&option.MountPermissions, "mount-permissions", 0777, "mounted folder permissions") + flag.BoolVar(&option.AllowInlineVolumeKeyAccessWithIdentity, "allow-inline-volume-key-access-with-idenitity", false, "allow accessing storage account key using cluster identity for inline volume") + flag.BoolVar(&option.AppendMountErrorHelpLink, "append-mount-error-help-link", true, "Whether to include a link for help with mount errors when a mount error occurs.") + flag.BoolVar(&option.EnableAznfsMount, "enable-aznfs-mount", false, "replace nfs mount with aznfs mount") + flag.IntVar(&option.VolStatsCacheExpireInMinutes, "vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") + flag.IntVar(&option.SasTokenExpirationMinutes, "sas-token-expiration-minutes", 1440, "sas token expiration minutes during volume cloning") +} + // Driver implements all interfaces of CSI drivers type Driver struct { csicommon.CSIDriver cloud *azure.Cloud + KubeClient kubernetes.Interface blobfuseProxyEndpoint string // enableBlobMockMount is only for testing, DO NOT set as true in non-testing scenario enableBlobMockMount bool @@ -206,7 +225,8 @@ type Driver struct { // NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & // does not support optional driver plugin info manifest field. Refer to CSI spec for more details. -func NewDriver(options *DriverOptions, cloud *azure.Cloud) *Driver { +func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *provider.Cloud) *Driver { + var err error d := Driver{ volLockMap: util.NewLockMap(), subnetLockMap: util.NewLockMap(), @@ -222,12 +242,13 @@ func NewDriver(options *DriverOptions, cloud *azure.Cloud) *Driver { enableAznfsMount: options.EnableAznfsMount, sasTokenExpirationMinutes: options.SasTokenExpirationMinutes, azcopy: &util.Azcopy{}, + KubeClient: kubeClient, + cloud: cloud, } d.Name = options.DriverName d.Version = driverVersion d.NodeID = options.NodeID - var err error getter := func(key string) (interface{}, error) { return nil, nil } if d.accountSearchCache, err = azcache.NewTimedCache(time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) @@ -242,7 +263,6 @@ func NewDriver(options *DriverOptions, cloud *azure.Cloud) *Driver { if d.volStatsCache, err = azcache.NewTimedCache(time.Duration(options.VolStatsCacheExpireInMinutes)*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } - d.cloud = cloud d.mounter = &mount.SafeFormatAndMount{ Interface: mount.New(""), Exec: utilexec.New(), diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 190f42eef..6941bd191 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -19,6 +19,7 @@ package blob import ( "context" "errors" + "flag" "fmt" "os" "reflect" @@ -29,12 +30,10 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - v1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" - "sigs.k8s.io/blob-csi-driver/pkg/util" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -56,7 +55,7 @@ func NewFakeDriver() *Driver { BlobfuseProxyConnTimout: 5, EnableBlobMockMount: false, } - driver := NewDriver(&driverOptions, &azure.Cloud{}) + driver := NewDriver(&driverOptions, nil, &azure.Cloud{}) driver.Name = fakeDriverName driver.Version = vendorVersion driver.subnetLockMap = util.NewLockMap() @@ -72,7 +71,7 @@ func TestNewFakeDriver(t *testing.T) { BlobfuseProxyConnTimout: 5, EnableBlobMockMount: false, } - d := NewDriver(&driverOptions, &azure.Cloud{}) + d := NewDriver(&driverOptions, nil, &azure.Cloud{}) assert.NotNil(t, d) } @@ -85,7 +84,7 @@ func TestNewDriver(t *testing.T) { BlobfuseProxyConnTimout: 5, EnableBlobMockMount: false, } - driver := NewDriver(&driverOptions, &azure.Cloud{}) + driver := NewDriver(&driverOptions, nil, &azure.Cloud{}) fakedriver := NewFakeDriver() fakedriver.Name = DefaultDriverName fakedriver.Version = driverVersion @@ -1231,7 +1230,7 @@ func TestGetSubnetResourceID(t *testing.T) { d.cloud.VnetResourceGroup = "foo" actualOutput := d.getSubnetResourceID("", "", "") expectedOutput := fmt.Sprintf(subnetTemplate, d.cloud.SubscriptionID, "foo", d.cloud.VnetName, d.cloud.SubnetName) - assert.Equal(t, actualOutput, expectedOutput, "cloud.SubscriptionID should be used as the SubID") + assert.Equal(t, actualOutput, expectedOutput, "config.SubscriptionID should be used as the SubID") }, }, { @@ -1259,7 +1258,7 @@ func TestGetSubnetResourceID(t *testing.T) { d.cloud.VnetResourceGroup = "" actualOutput := d.getSubnetResourceID("", "", "") expectedOutput := fmt.Sprintf(subnetTemplate, "bar", d.cloud.ResourceGroup, d.cloud.VnetName, d.cloud.SubnetName) - assert.Equal(t, actualOutput, expectedOutput, "cloud.Resourcegroup should be used as the rg") + assert.Equal(t, actualOutput, expectedOutput, "config.ResourceGroup should be used as the rg") }, }, { @@ -1273,7 +1272,7 @@ func TestGetSubnetResourceID(t *testing.T) { d.cloud.VnetResourceGroup = "fakeVnetResourceGroup" actualOutput := d.getSubnetResourceID("", "", "") expectedOutput := fmt.Sprintf(subnetTemplate, "bar", d.cloud.VnetResourceGroup, d.cloud.VnetName, d.cloud.SubnetName) - assert.Equal(t, actualOutput, expectedOutput, "cloud.VnetResourceGroup should be used as the rg") + assert.Equal(t, actualOutput, expectedOutput, "config.VnetResourceGroup should be used as the rg") }, }, { @@ -1703,3 +1702,21 @@ func TestIsNFSProtocol(t *testing.T) { } } } + +func TestDriverOptions_AddFlags(t *testing.T) { + t.Run("test options", func(t *testing.T) { + option := DriverOptions{} + option.AddFlags() + typeInfo := reflect.TypeOf(option) + numOfExpectedOptions := typeInfo.NumField() + count := 0 + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if !strings.Contains(f.Name, "test") { + count++ + } + }) + if numOfExpectedOptions != count { + t.Errorf("expected %d flags, but found %d flag in DriverOptions", numOfExpectedOptions, count) + } + }) +} diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 0f7e03747..7446c2012 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -546,8 +546,8 @@ func TestCreateVolume(t *testing.T) { testFunc: func(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{} - d.cloud.Config.DisableAzureStackCloud = false - d.cloud.Config.Cloud = "AZURESTACKCLOUD" + d.cloud.DisableAzureStackCloud = false + d.cloud.Cloud = "AZURESTACKCLOUD" d.cloud.SubscriptionID = "subID" mp := make(map[string]string) mp[storeAccountKeyField] = falseValue diff --git a/pkg/blobplugin/main.go b/pkg/blobplugin/main.go index 474bb4a4e..68d8d46cd 100644 --- a/pkg/blobplugin/main.go +++ b/pkg/blobplugin/main.go @@ -26,45 +26,37 @@ import ( "strings" "sigs.k8s.io/blob-csi-driver/pkg/blob" + "sigs.k8s.io/blob-csi-driver/pkg/util" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog/v2" ) +var driverOptions blob.DriverOptions var ( - endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") - blobfuseProxyEndpoint = flag.String("blobfuse-proxy-endpoint", "unix://tmp/blobfuse-proxy.sock", "blobfuse-proxy endpoint") - nodeID = flag.String("nodeid", "", "node id") - version = flag.Bool("version", false, "Print the version and exit.") - metricsAddress = flag.String("metrics-address", "", "export the metrics") - kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") - driverName = flag.String("drivername", blob.DefaultDriverName, "name of the driver") - enableBlobfuseProxy = flag.Bool("enable-blobfuse-proxy", false, "using blobfuse proxy for mounts") - blobfuseProxyConnTimout = flag.Int("blobfuse-proxy-connect-timeout", 5, "blobfuse proxy connection timeout(seconds)") - enableBlobMockMount = flag.Bool("enable-blob-mock-mount", false, "enable mock mount(only for testing)") - cloudConfigSecretName = flag.String("cloud-config-secret-name", "azure-cloud-provider", "secret name of cloud config") - cloudConfigSecretNamespace = flag.String("cloud-config-secret-namespace", "kube-system", "secret namespace of cloud config") - customUserAgent = flag.String("custom-user-agent", "", "custom userAgent") - userAgentSuffix = flag.String("user-agent-suffix", "", "userAgent suffix") - allowEmptyCloudConfig = flag.Bool("allow-empty-cloud-config", true, "allow running driver without cloud config") - enableGetVolumeStats = flag.Bool("enable-get-volume-stats", false, "allow GET_VOLUME_STATS on agent node") - appendTimeStampInCacheDir = flag.Bool("append-timestamp-cache-dir", false, "append timestamp into cache directory on agent node") - mountPermissions = flag.Uint64("mount-permissions", 0777, "mounted folder permissions") - allowInlineVolumeKeyAccessWithIdentity = flag.Bool("allow-inline-volume-key-access-with-idenitity", false, "allow accessing storage account key using cluster identity for inline volume") - kubeAPIQPS = flag.Float64("kube-api-qps", 25.0, "QPS to use while communicating with the kubernetes apiserver.") - kubeAPIBurst = flag.Int("kube-api-burst", 50, "Burst to use while communicating with the kubernetes apiserver.") - appendMountErrorHelpLink = flag.Bool("append-mount-error-help-link", true, "Whether to include a link for help with mount errors when a mount error occurs.") - enableAznfsMount = flag.Bool("enable-aznfs-mount", false, "replace nfs mount with aznfs mount") - volStatsCacheExpireInMinutes = flag.Int("vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") - sasTokenExpirationMinutes = flag.Int("sas-token-expiration-minutes", 1440, "sas token expiration minutes during volume cloning") + metricsAddress = flag.String("metrics-address", "", "export the metrics") + version = flag.Bool("version", false, "Print the version and exit.") + endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") + kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") + cloudConfigSecretName = flag.String("cloud-config-secret-name", "azure-cloud-provider", "secret name of cloud config") + cloudConfigSecretNamespace = flag.String("cloud-config-secret-namespace", "kube-system", "secret namespace of cloud config") + customUserAgent = flag.String("custom-user-agent", "", "custom userAgent") + userAgentSuffix = flag.String("user-agent-suffix", "", "userAgent suffix") + allowEmptyCloudConfig = flag.Bool("allow-empty-cloud-config", true, "allow running driver without cloud config") + kubeAPIQPS = flag.Float64("kube-api-qps", 25.0, "QPS to use while communicating with the kubernetes apiserver.") + kubeAPIBurst = flag.Int("kube-api-burst", 50, "Burst to use while communicating with the kubernetes apiserver.") ) +func init() { + driverOptions.AddFlags() +} + func main() { klog.InitFlags(nil) _ = flag.Set("logtostderr", "true") flag.Parse() if *version { - info, err := blob.GetVersionYAML(*driverName) + info, err := blob.GetVersionYAML(driverOptions.DriverName) if err != nil { klog.Fatalln(err) } @@ -78,33 +70,21 @@ func main() { } func handle() { - driverOptions := blob.DriverOptions{ - NodeID: *nodeID, - DriverName: *driverName, - BlobfuseProxyEndpoint: *blobfuseProxyEndpoint, - EnableBlobfuseProxy: *enableBlobfuseProxy, - BlobfuseProxyConnTimout: *blobfuseProxyConnTimout, - EnableBlobMockMount: *enableBlobMockMount, - EnableGetVolumeStats: *enableGetVolumeStats, - AppendTimeStampInCacheDir: *appendTimeStampInCacheDir, - MountPermissions: *mountPermissions, - AllowInlineVolumeKeyAccessWithIdentity: *allowInlineVolumeKeyAccessWithIdentity, - AppendMountErrorHelpLink: *appendMountErrorHelpLink, - EnableAznfsMount: *enableAznfsMount, - VolStatsCacheExpireInMinutes: *volStatsCacheExpireInMinutes, - SasTokenExpirationMinutes: *sasTokenExpirationMinutes, - } - userAgent := blob.GetUserAgent(driverOptions.DriverName, *customUserAgent, *userAgentSuffix) klog.V(2).Infof("driver userAgent: %s", userAgent) - cloud, err := blob.GetCloudProvider(context.Background(), *kubeconfig, driverOptions.NodeID, *cloudConfigSecretName, *cloudConfigSecretNamespace, userAgent, *allowEmptyCloudConfig, *kubeAPIQPS, *kubeAPIBurst) + kubeClient, err := util.GetKubeClient(*kubeconfig, *kubeAPIQPS, *kubeAPIBurst, userAgent) + if err != nil { + klog.Warningf("failed to get kubeClient, error: %v", err) + } + + cloud, err := blob.GetCloudProvider(context.Background(), kubeClient, driverOptions.NodeID, *cloudConfigSecretName, *cloudConfigSecretNamespace, userAgent, *allowEmptyCloudConfig) if err != nil { klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err) } klog.V(2).Infof("cloud: %s, location: %s, rg: %s, VnetName: %s, VnetResourceGroup: %s, SubnetName: %s", cloud.Cloud, cloud.Location, cloud.ResourceGroup, cloud.VnetName, cloud.VnetResourceGroup, cloud.SubnetName) - driver := blob.NewDriver(&driverOptions, cloud) + driver := blob.NewDriver(&driverOptions, kubeClient, cloud) if driver == nil { klog.Fatalln("Failed to initialize Azure Blob Storage CSI driver") } diff --git a/pkg/util/util.go b/pkg/util/util.go index 3159ad1e3..691984027 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -26,6 +26,9 @@ import ( "github.com/go-ini/ini" "github.com/pkg/errors" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) @@ -305,3 +308,23 @@ func parseAzcopyJobShow(jobshow string) (AzcopyJobState, string, error) { } return AzcopyJobRunning, strings.ReplaceAll(segments[1], "\n", ""), nil } + +func GetKubeClient(kubeconfig string, kubeAPIQPS float64, kubeAPIBurst int, userAgent string) (kubernetes.Interface, error) { + var err error + var kubeCfg *rest.Config + if kubeCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig); err != nil { + return nil, err + } + if kubeCfg == nil { + if kubeCfg, err = rest.InClusterConfig(); err != nil { + return nil, err + } + } + //kubeCfg should not be nil + // set QPS and QPS Burst as higher values + klog.V(2).Infof("set QPS(%f) and QPS Burst(%d) for driver kubeClient", float32(kubeAPIQPS), kubeAPIBurst) + kubeCfg.QPS = float32(kubeAPIQPS) + kubeCfg.Burst = kubeAPIBurst + kubeCfg.UserAgent = userAgent + return kubernetes.NewForConfig(kubeCfg) +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index dabfd77e9..06ba4a400 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -524,3 +524,79 @@ func TestParseAzcopyJobShow(t *testing.T) { } } } + +func TestGetKubeConfig(t *testing.T) { + emptyKubeConfig := "empty-Kube-Config" + validKubeConfig := "valid-Kube-Config" + nonexistingConfig := "nonexisting-config" + fakeContent := ` +apiVersion: v1 +clusters: +- cluster: + server: https://localhost:8080 + name: foo-cluster +contexts: +- context: + cluster: foo-cluster + user: foo-user + namespace: bar + name: foo-context +current-context: foo-context +kind: Config +users: +- name: foo-user + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - arg-1 + - arg-2 + command: foo-command +` + if err := os.WriteFile(validKubeConfig, []byte(""), 0666); err != nil { + t.Error(err) + } + defer os.Remove(emptyKubeConfig) + if err := os.WriteFile(validKubeConfig, []byte(fakeContent), 0666); err != nil { + t.Error(err) + } + defer os.Remove(validKubeConfig) + + tests := []struct { + desc string + kubeconfig string + expectError bool + envVariableHasConfig bool + envVariableConfigIsValid bool + }{ + { + desc: "[success] valid kube config passed", + kubeconfig: validKubeConfig, + expectError: false, + envVariableHasConfig: false, + envVariableConfigIsValid: false, + }, + { + desc: "[failure] invalid kube config passed", + kubeconfig: emptyKubeConfig, + expectError: true, + envVariableHasConfig: false, + envVariableConfigIsValid: false, + }, + { + desc: "[failure] invalid kube config passed", + kubeconfig: nonexistingConfig, + expectError: true, + envVariableHasConfig: false, + envVariableConfigIsValid: false, + }, + } + + for _, test := range tests { + _, err := GetKubeClient(test.kubeconfig, 25.0, 50, "") + receiveError := (err != nil) + if test.expectError != receiveError { + t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectErr: %v", test.desc, test.kubeconfig, err, test.expectError) + } + } +} diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 9b51476de..c4604f0a7 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -39,6 +39,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework/config" _ "k8s.io/kubernetes/test/e2e/framework/debug/init" "sigs.k8s.io/blob-csi-driver/pkg/blob" + "sigs.k8s.io/blob-csi-driver/pkg/util" "sigs.k8s.io/blob-csi-driver/test/utils/azure" "sigs.k8s.io/blob-csi-driver/test/utils/credentials" "sigs.k8s.io/blob-csi-driver/test/utils/testutil" @@ -151,9 +152,11 @@ var _ = ginkgo.SynchronizedBeforeSuite(func(ctx ginkgo.SpecContext) []byte { BlobfuseProxyConnTimout: 5, EnableBlobMockMount: false, } - cloud, err := blob.GetCloudProvider(context.Background(), kubeconfig, driverOptions.NodeID, "", "", "", false, 0, 0) + kubeClient, err := util.GetKubeClient(kubeconfig, 25.0, 50, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) - blobDriver = blob.NewDriver(&driverOptions, cloud) + cloud, err := blob.GetCloudProvider(context.Background(), kubeClient, driverOptions.NodeID, "", "", "", false) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + blobDriver = blob.NewDriver(&driverOptions, kubeClient, cloud) go func() { blobDriver.Run(fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), false) }() From acb1ebc607159d97a77846bec255e0c4dad22712 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:52:46 +0000 Subject: [PATCH 013/157] chore(deps): bump github.com/kubernetes-csi/csi-lib-utils Bumps [github.com/kubernetes-csi/csi-lib-utils](https://github.com/kubernetes-csi/csi-lib-utils) from 0.13.0 to 0.16.0. - [Release notes](https://github.com/kubernetes-csi/csi-lib-utils/releases) - [Commits](https://github.com/kubernetes-csi/csi-lib-utils/compare/v0.13.0...v0.16.0) --- updated-dependencies: - dependency-name: github.com/kubernetes-csi/csi-lib-utils dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +- go.sum | 22 +- .../protosanitizer/protosanitizer.go | 9 +- .../api/annotations/field_info.pb.go | 295 ++++++++++++++++++ vendor/modules.txt | 8 +- 5 files changed, 318 insertions(+), 24 deletions(-) create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go diff --git a/go.mod b/go.mod index 6a9e452a0..edc9f0ea5 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/imdario/mergo v0.3.9 // indirect - github.com/kubernetes-csi/csi-lib-utils v0.13.0 + github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/gomega v1.30.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 @@ -139,9 +139,9 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 24f74efc1..186b0fb4e 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= +cloud.google.com/go/compute v1.23.2 h1:nWEMDhgbBkBJjfpVySqU4jgWdc22PLR0o4vEexZHers= +cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= @@ -189,8 +189,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w= -github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE= +github.com/kubernetes-csi/csi-lib-utils v0.16.0 h1:LXCvkhXHtFOkl7LoDqFdho/MuebccZqWxLwhKiRGiBg= +github.com/kubernetes-csi/csi-lib-utils v0.16.0/go.mod h1:fp1Oik+45tP2o4X9SD/SBWXLTQYT9wtLxGasBE3+vBI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -403,12 +403,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= diff --git a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go index af64a7b27..cad31f17f 100644 --- a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go +++ b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go @@ -26,7 +26,6 @@ import ( "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" - protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" protobufdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" ) @@ -56,7 +55,7 @@ func StripSecretsCSI03(msg interface{}) fmt.Stringer { type stripSecrets struct { msg interface{} - isSecretField func(field *protobuf.FieldDescriptorProto) bool + isSecretField func(field *protobufdescriptor.FieldDescriptorProto) bool } func (s *stripSecrets) String() string { @@ -110,7 +109,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) { if _, ok := parsedFields[field.GetName()]; ok { parsedFields[field.GetName()] = "***stripped***" } - } else if field.GetType() == protobuf.FieldDescriptorProto_TYPE_MESSAGE { + } else if field.GetType() == protobufdescriptor.FieldDescriptorProto_TYPE_MESSAGE { // When we get here, // the type name is something like ".csi.v1.CapacityRange" (leading dot!) // and looking up "csi.v1.CapacityRange" @@ -150,7 +149,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) { // isCSI1Secret uses the csi.E_CsiSecret extension from CSI 1.0 to // determine whether a field contains secrets. -func isCSI1Secret(field *protobuf.FieldDescriptorProto) bool { +func isCSI1Secret(field *protobufdescriptor.FieldDescriptorProto) bool { ex, err := proto.GetExtension(field.Options, e_CsiSecret) return err == nil && ex != nil && *ex.(*bool) } @@ -172,6 +171,6 @@ var e_CsiSecret = &proto.ExtensionDesc{ // isCSI03Secret relies on the naming convention in CSI <= 0.3 // to determine whether a field contains secrets. -func isCSI03Secret(field *protobuf.FieldDescriptorProto) bool { +func isCSI03Secret(field *protobufdescriptor.FieldDescriptorProto) bool { return strings.HasSuffix(field.GetName(), "_secrets") } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 000000000..d02e6bbc8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7b12f03e3..bf798c16c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -308,7 +308,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kubernetes-csi/csi-lib-utils v0.13.0 +# github.com/kubernetes-csi/csi-lib-utils v0.16.0 ## explicit; go 1.18 github.com/kubernetes-csi/csi-lib-utils/protosanitizer # github.com/kylelemons/godebug v1.1.0 @@ -630,16 +630,16 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status From cb87f32fbf959ba51e376e1f4b9b1a1552712e56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 17:01:28 +0000 Subject: [PATCH 014/157] chore(deps): bump github/codeql-action from 2 to 3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2 to 3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/v2...v3) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 954f8dd4e..162131cc5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -48,7 +48,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -63,4 +63,4 @@ jobs: make all - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From 85c2835784bf5e1ed2a364ab4b46f54a068c9f3e Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 14 Dec 2023 13:11:29 +0800 Subject: [PATCH 015/157] Update csi-debug.md --- docs/csi-debug.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/csi-debug.md b/docs/csi-debug.md index d7aaa9ce4..34b25fe6f 100644 --- a/docs/csi-debug.md +++ b/docs/csi-debug.md @@ -42,6 +42,7 @@ kubectl logs csi-blob-node-cvgbs -c blob -n kube-system > csi-blob-node.log > ```console > journalctl -u blobfuse-proxy -l > ``` +> note: if there are no logs for blobfuse-proxy, you can check the status of the blobfuse-proxy service by running the command `systemctl status blobfuse-proxy`. - check blobfuse mount inside driver ```console From 6ec34374830b1f6e53c2c0ce03b684c493362e61 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Thu, 14 Dec 2023 07:54:36 +0000 Subject: [PATCH 016/157] fix: support install blobfuse proxy on other linux distributions --- pkg/blobfuse-proxy/init.sh | 8 +-- pkg/blobfuse-proxy/install-proxy-mariner.sh | 62 ------------------- ...stall-proxy-ubuntu.sh => install-proxy.sh} | 5 +- pkg/blobplugin/Dockerfile | 6 +- 4 files changed, 7 insertions(+), 74 deletions(-) delete mode 100644 pkg/blobfuse-proxy/install-proxy-mariner.sh rename pkg/blobfuse-proxy/{install-proxy-ubuntu.sh => install-proxy.sh} (96%) diff --git a/pkg/blobfuse-proxy/init.sh b/pkg/blobfuse-proxy/init.sh index 4628f5ae4..31fa8d7e5 100755 --- a/pkg/blobfuse-proxy/init.sh +++ b/pkg/blobfuse-proxy/init.sh @@ -30,17 +30,11 @@ echo "Linux distribution: $DISTRIBUTION, Arch: $ARCH" # install blobfuse-proxy and blobfuse/blofuse2 if needed case "${DISTRIBUTION}" in - "ubuntu") - . ./blobfuse-proxy/install-proxy-ubuntu.sh - ;; "rhcos") . ./blobfuse-proxy/install-proxy-rhcos.sh ;; - "mariner") - . ./blobfuse-proxy/install-proxy-mariner.sh - ;; *) - echo "Unsupported distribution: ${DISTRIBUTION}" + . ./blobfuse-proxy/install-proxy.sh ;; esac diff --git a/pkg/blobfuse-proxy/install-proxy-mariner.sh b/pkg/blobfuse-proxy/install-proxy-mariner.sh deleted file mode 100644 index 4f7382708..000000000 --- a/pkg/blobfuse-proxy/install-proxy-mariner.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/sh - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xe - -# install blobfuse/blobfuse2 -echo "skip install blobfuse/blobfuse2 for mariner...." - -# install blobfuse-proxy -updateBlobfuseProxy="true" -if [ -f "/host/usr/bin/blobfuse-proxy" ];then - old=$(sha256sum /host/usr/bin/blobfuse-proxy | awk '{print $1}') - new=$(sha256sum /blobfuse-proxy/blobfuse-proxy | awk '{print $1}') - if [ "$old" = "$new" ];then - updateBlobfuseProxy="false" - echo "no need to update blobfuse-proxy" - fi -fi -if [ "$updateBlobfuseProxy" = "true" ];then - echo "copy blobfuse-proxy...." - rm -rf /host/var/lib/kubelet/plugins/blob.csi.azure.com/blobfuse-proxy.sock - rm -rf /host/usr/bin/blobfuse-proxy - cp /blobfuse-proxy/blobfuse-proxy /host/usr/bin/blobfuse-proxy - chmod 755 /host/usr/bin/blobfuse-proxy -fi - -updateService="true" -if [ -f "/host/usr/lib/systemd/system/blobfuse-proxy.service" ];then - old=$(sha256sum /host/usr/lib/systemd/system/blobfuse-proxy.service | awk '{print $1}') - new=$(sha256sum /blobfuse-proxy/blobfuse-proxy.service | awk '{print $1}') - if [ "$old" = "$new" ];then - updateService="false" - echo "no need to update blobfuse-proxy.service" - fi -fi -if [ "$updateService" = "true" ];then - echo "copy blobfuse-proxy.service...." - mkdir -p /host/usr/lib/systemd/system - cp /blobfuse-proxy/blobfuse-proxy.service /host/usr/lib/systemd/system/blobfuse-proxy.service -fi - -if [ "${INSTALL_BLOBFUSE_PROXY}" = "true" ];then - if [ "$updateBlobfuseProxy" = "true" ] || [ "$updateService" = "true" ];then - echo "start blobfuse-proxy...." - $HOST_CMD systemctl daemon-reload - $HOST_CMD systemctl enable blobfuse-proxy.service - $HOST_CMD systemctl restart blobfuse-proxy.service - fi -fi diff --git a/pkg/blobfuse-proxy/install-proxy-ubuntu.sh b/pkg/blobfuse-proxy/install-proxy.sh similarity index 96% rename from pkg/blobfuse-proxy/install-proxy-ubuntu.sh rename to pkg/blobfuse-proxy/install-proxy.sh index 911be8356..a642bb3aa 100644 --- a/pkg/blobfuse-proxy/install-proxy-ubuntu.sh +++ b/pkg/blobfuse-proxy/install-proxy.sh @@ -17,7 +17,10 @@ set -xe # install blobfuse/blobfuse2 -if [ "${ARCH}" = "aarch64" ] +if [ "${DISTRIBUTION}" != "ubuntu" ] +then + echo "skip install blobfuse/blobfuse2 for ${DISTRIBUTION}...." +elif [ "${ARCH}" = "aarch64" ] then echo "skip install blobfuse/blobfuse2 for arm64...." elif [ "${INSTALL_BLOBFUSE}" = "true" ] || [ "${INSTALL_BLOBFUSE2}" = "true" ] diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 0983c1802..bf0aec8fa 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -21,16 +21,14 @@ COPY ${binary} /blobplugin RUN mkdir /blobfuse-proxy/ COPY ./pkg/blobfuse-proxy/init.sh /blobfuse-proxy/ -COPY ./pkg/blobfuse-proxy/install-proxy-ubuntu.sh /blobfuse-proxy/ +COPY ./pkg/blobfuse-proxy/install-proxy.sh /blobfuse-proxy/ COPY ./pkg/blobfuse-proxy/install-proxy-rhcos.sh /blobfuse-proxy/ -COPY ./pkg/blobfuse-proxy/install-proxy-mariner.sh /blobfuse-proxy/ COPY ./pkg/blobfuse-proxy/blobfuse-proxy.service /blobfuse-proxy/ COPY ./_output/${ARCH}/blobfuse-proxy /blobfuse-proxy/ RUN chmod +x /blobfuse-proxy/init.sh && \ - chmod +x /blobfuse-proxy/install-proxy-ubuntu.sh && \ chmod +x /blobfuse-proxy/install-proxy-rhcos.sh && \ - chmod +x /blobfuse-proxy/install-proxy-mariner.sh && \ + chmod +x /blobfuse-proxy/install-proxy.sh && \ chmod +x /blobfuse-proxy/blobfuse-proxy.service && \ chmod +x /blobfuse-proxy/blobfuse-proxy From 309feb0613b0012dd8e46fc3ac88db85666dead0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 15 Dec 2023 11:52:51 +0000 Subject: [PATCH 017/157] feat: upgrade to aznfs 2.0.1 version for nfs mount --- pkg/blobplugin/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index bf0aec8fa..a5a09910f 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -37,9 +37,9 @@ RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca- # install aznfs RUN if [ "$ARCH" = "amd64" ] ; then \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.0/aznfs-2.0.0-1.x86_64.tar.gz; \ + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.1/aznfs-2.0.1-1.x86_64.tar.gz; \ else \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.0/aznfs-2.0.0-1.arm64.tar.gz;fi + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.1/aznfs-2.0.1-1.arm64.tar.gz;fi RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy From e0cf6548dd880de38d0b80251983ab49a82d1bb4 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Dec 2023 08:19:52 +0000 Subject: [PATCH 018/157] fix: CVE-2023-48795 --- go.mod | 2 +- go.sum | 4 +- vendor/golang.org/x/crypto/ssh/channel.go | 28 ++++++++--- vendor/golang.org/x/crypto/ssh/client.go | 2 +- vendor/golang.org/x/crypto/ssh/handshake.go | 56 +++++++++++++++++++-- vendor/golang.org/x/crypto/ssh/server.go | 2 + vendor/golang.org/x/crypto/ssh/transport.go | 32 ++++++++++-- vendor/modules.txt | 2 +- 8 files changed, 106 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index edc9f0ea5..00f94dd79 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,7 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.5.0 // indirect diff --git a/go.sum b/go.sum index 186b0fb4e..148dadd3d 100644 --- a/go.sum +++ b/go.sum @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go index c0834c00d..cc0bb7ab6 100644 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -187,9 +187,11 @@ type channel struct { pending *buffer extPending *buffer - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 + // windowMu protects myWindow, the flow-control window, and myConsumed, + // the number of bytes consumed since we last increased myWindow + windowMu sync.Mutex + myWindow uint32 + myConsumed uint32 // writeMu serializes calls to mux.conn.writePacket() and // protects sentClose and packetPool. This mutex must be @@ -332,14 +334,24 @@ func (ch *channel) handleData(packet []byte) error { return nil } -func (c *channel) adjustWindow(n uint32) error { +func (c *channel) adjustWindow(adj uint32) error { c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) + // Since myConsumed and myWindow are managed on our side, and can never + // exceed the initial window setting, we don't worry about overflow. + c.myConsumed += adj + var sendAdj uint32 + if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) || + (c.myWindow < channelWindowSize/2) { + sendAdj = c.myConsumed + c.myConsumed = 0 + c.myWindow += sendAdj + } c.windowMu.Unlock() + if sendAdj == 0 { + return nil + } return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), + AdditionalBytes: sendAdj, }) } diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index bdc356cbd..fd8c49749 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -82,7 +82,7 @@ func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan if err := conn.clientHandshake(addr, &fullConf); err != nil { c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err) } conn.mux = newMux(conn.transport) return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 49bbba769..56cdc7c21 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -35,6 +35,16 @@ type keyingTransport interface { // direction will be effected if a msgNewKeys message is sent // or received. prepareKeyChange(*algorithms, *kexResult) error + + // setStrictMode sets the strict KEX mode, notably triggering + // sequence number resets on sending or receiving msgNewKeys. + // If the sequence number is already > 1 when setStrictMode + // is called, an error is returned. + setStrictMode() error + + // setInitialKEXDone indicates to the transport that the initial key exchange + // was completed + setInitialKEXDone() } // handshakeTransport implements rekeying on top of a keyingTransport @@ -100,6 +110,10 @@ type handshakeTransport struct { // The session ID or nil if first kex did not complete yet. sessionID []byte + + // strictMode indicates if the other side of the handshake indicated + // that we should be following the strict KEX protocol restrictions. + strictMode bool } type pendingKex struct { @@ -209,7 +223,10 @@ func (t *handshakeTransport) readLoop() { close(t.incoming) break } - if p[0] == msgIgnore || p[0] == msgDebug { + // If this is the first kex, and strict KEX mode is enabled, + // we don't ignore any messages, as they may be used to manipulate + // the packet sequence numbers. + if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { continue } t.incoming <- p @@ -441,6 +458,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { return successPacket, nil } +const ( + kexStrictClient = "kex-strict-c-v00@openssh.com" + kexStrictServer = "kex-strict-s-v00@openssh.com" +) + // sendKexInit sends a key change message. func (t *handshakeTransport) sendKexInit() error { t.mu.Lock() @@ -454,7 +476,6 @@ func (t *handshakeTransport) sendKexInit() error { } msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, CiphersClientServer: t.config.Ciphers, CiphersServerClient: t.config.Ciphers, MACsClientServer: t.config.MACs, @@ -464,6 +485,13 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) + // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, + // and possibly to add the ext-info extension algorithm. Since the slice may be the + // user owned KeyExchanges, we create our own slice in order to avoid using user + // owned memory by mistake. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + isServer := len(t.hostKeys) > 0 if isServer { for _, k := range t.hostKeys { @@ -488,17 +516,24 @@ func (t *handshakeTransport) sendKexInit() error { msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } + + if t.sessionID == nil { + msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) + } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what // algorithms the server supports for public key authentication. See RFC // 8308, Section 2.1. + // + // We also send the strict KEX mode extension algorithm, in order to opt + // into the strict KEX mode. if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) } + } packet := Marshal(msg) @@ -604,6 +639,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } + if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + t.strictMode = true + if err := t.conn.setStrictMode(); err != nil { + return err + } + } + // We don't send FirstKexFollows, but we handle receiving it. // // RFC 4253 section 7 defines the kex and the agreement method for @@ -679,6 +721,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return unexpectedMessageError(msgNewKeys, packet[0]) } + if firstKeyExchange { + // Indicates to the transport that the first key exchange is completed + // after receiving SSH_MSG_NEWKEYS. + t.conn.setInitialKEXDone() + } + return nil } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 7f0c236a9..c2dfe3268 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -213,6 +213,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } else { for _, algo := range fullConf.PublicKeyAuthAlgorithms { if !contains(supportedPubKeyAuthAlgos, algo) { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) } } @@ -220,6 +221,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha // Check if the config contains any unsupported key exchanges for _, kex := range fullConf.KeyExchanges { if _, ok := serverForbiddenKexAlgos[kex]; ok { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) } } diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index da015801e..0424d2d37 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -49,6 +49,9 @@ type transport struct { rand io.Reader isClient bool io.Closer + + strictMode bool + initialKEXDone bool } // packetCipher represents a combination of SSH encryption/MAC @@ -74,6 +77,18 @@ type connectionState struct { pendingKeyChange chan packetCipher } +func (t *transport) setStrictMode() error { + if t.reader.seqNum != 1 { + return errors.New("ssh: sequence number != 1 when strict KEX mode requested") + } + t.strictMode = true + return nil +} + +func (t *transport) setInitialKEXDone() { + t.initialKEXDone = true +} + // prepareKeyChange sets up key material for a keychange. The key changes in // both directions are triggered by reading and writing a msgNewKey packet // respectively. @@ -112,11 +127,12 @@ func (t *transport) printPacket(p []byte, write bool) { // Read and decrypt next packet. func (t *transport) readPacket() (p []byte, err error) { for { - p, err = t.reader.readPacket(t.bufReader) + p, err = t.reader.readPacket(t.bufReader, t.strictMode) if err != nil { break } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX + if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { break } } @@ -127,7 +143,7 @@ func (t *transport) readPacket() (p []byte, err error) { return p, err } -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { +func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) s.seqNum++ if err == nil && len(packet) == 0 { @@ -140,6 +156,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: return nil, errors.New("ssh: got bogus newkeys message") } @@ -170,10 +189,10 @@ func (t *transport) writePacket(packet []byte) error { if debugTransport { t.printPacket(packet, true) } - return t.writer.writePacket(t.bufWriter, t.rand, packet) + return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) } -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { changeKeys := len(packet) > 0 && packet[0] == msgNewKeys err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) @@ -188,6 +207,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet [] select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: panic("ssh: no key material for msgNewKeys") } diff --git a/vendor/modules.txt b/vendor/modules.txt index bf798c16c..f91688ff4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -529,7 +529,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.16.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 From 1e92917c42d73339020c83b2963e57287a87aef2 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Dec 2023 08:20:11 +0000 Subject: [PATCH 019/157] test: ignore azcopy CVE-2023-48795 scan failure --- .trivyignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 000000000..ac4250b0e --- /dev/null +++ b/.trivyignore @@ -0,0 +1 @@ +CVE-2023-48795 From cc483711eab3baf8edf0ec78b50032fe66d4438c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Dec 2023 13:22:08 +0000 Subject: [PATCH 020/157] doc: fix volume cloning doc --- deploy/example/cloning/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/example/cloning/README.md b/deploy/example/cloning/README.md index c81e11482..e0795b965 100644 --- a/deploy/example/cloning/README.md +++ b/deploy/example/cloning/README.md @@ -2,6 +2,10 @@ ## Feature Status: Beta - supported from v1.23.2 +- NFSv3 protocol is not supported + +## Prerequisites +- ensure that the virtual network on which the driver controller pod is running is included in the allowed list of the storage account VNet setting. ## Create a Source PVC From d184ffb486b4241f73422ed2bce64479df345446 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Dec 2023 16:16:42 +0000 Subject: [PATCH 021/157] chore(deps): bump k8s.io/kubernetes from 1.28.4 to 1.28.5 Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.28.4 to 1.28.5. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.28.4...v1.28.5) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/k8s.io/kubernetes/pkg/features/kube_features.go | 2 +- vendor/k8s.io/kubernetes/test/utils/image/manifest.go | 2 +- vendor/modules.txt | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 00f94dd79..0ec6189f0 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( k8s.io/client-go v0.28.4 k8s.io/component-base v0.28.4 k8s.io/klog/v2 v2.110.1 - k8s.io/kubernetes v1.28.4 + k8s.io/kubernetes v1.28.5 k8s.io/mount-utils v0.28.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 diff --git a/go.sum b/go.sum index 148dadd3d..9132c9cf6 100644 --- a/go.sum +++ b/go.sum @@ -463,8 +463,8 @@ k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= -k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= -k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= +k8s.io/kubernetes v1.28.5 h1:sqgm0Tk6lqfsfcLUxZ0rfNmtugtABSLUhCqhXtfV1C0= +k8s.io/kubernetes v1.28.5/go.mod h1:lRbvAZMgn+BrOtymmJAp85Jsa7GlL01av29axiPJ+E0= k8s.io/mount-utils v0.28.4 h1:5GOZLm2dXi2fr+MKY8hS6kdV5reXrZBiK7848O5MVD0= k8s.io/mount-utils v0.28.4/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= k8s.io/pod-security-admission v0.28.4 h1:b9d6zfKNjkawrO2gF7rBr5XoSZqPfE6UjKLNjgXYrr0= diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 56791b47c..1173090b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -1137,7 +1137,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta}, - SchedulerQueueingHints: {Default: true, PreRelease: featuregate.Beta}, + SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta}, SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 578a0c0f4..8b3384bb5 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -241,7 +241,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.8"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} diff --git a/vendor/modules.txt b/vendor/modules.txt index f91688ff4..00269a6d3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1441,7 +1441,7 @@ k8s.io/kubectl/pkg/util/podutils ## explicit; go 1.20 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.28.4 +# k8s.io/kubernetes v1.28.5 ## explicit; go 1.20 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service From 2afb58a55a8714ea0641d524fd55b6355eaf6444 Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Thu, 21 Dec 2023 17:01:42 +0800 Subject: [PATCH 022/157] refactor:remove grpc wrapper --- go.mod | 2 +- pkg/blob/blob.go | 36 +++++++++-- pkg/blob/blob_test.go | 21 +++++- pkg/blobplugin/main.go | 4 +- pkg/csi-common/server.go | 118 ---------------------------------- pkg/csi-common/server_test.go | 67 ------------------- pkg/csi-common/utils.go | 30 ++++++++- pkg/csi-common/utils_test.go | 54 +++++++++++++++- test/e2e/suite_test.go | 3 +- 9 files changed, 136 insertions(+), 199 deletions(-) delete mode 100644 pkg/csi-common/server.go delete mode 100644 pkg/csi-common/server_test.go diff --git a/go.mod b/go.mod index 00f94dd79..3e959649c 100644 --- a/go.mod +++ b/go.mod @@ -132,7 +132,7 @@ require ( golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.5.0 // indirect + golang.org/x/sync v0.5.0 golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 0f6e693e1..b3b3b4faa 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -18,6 +18,7 @@ package blob import ( "context" + "errors" "flag" "fmt" "os" @@ -31,8 +32,9 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/pborman/uuid" + "google.golang.org/grpc" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -301,17 +303,37 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p } // Run driver initialization -func (d *Driver) Run(endpoint string, testBool bool) { +func (d *Driver) Run(ctx context.Context, endpoint string) error { versionMeta, err := GetVersionYAML(d.Name) if err != nil { klog.Fatalf("%v", err) } klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) - - s := csicommon.NewNonBlockingGRPCServer() + grpcInterceptor := grpc.UnaryInterceptor(csicommon.LogGRPC) + opts := []grpc.ServerOption{ + grpcInterceptor, + } + s := grpc.NewServer(opts...) + csi.RegisterIdentityServer(s, d) + csi.RegisterControllerServer(s, d) + csi.RegisterNodeServer(s, d) + + go func() { + //graceful shutdown + <-ctx.Done() + s.GracefulStop() + }() // Driver d act as IdentityServer, ControllerServer and NodeServer - s.Start(endpoint, d, d, d, testBool) - s.Wait() + listener, err := csicommon.Listen(ctx, endpoint) + if err != nil { + klog.Fatalf("failed to listen to endpoint, error: %v", err) + } + err = s.Serve(listener) + if errors.Is(err, grpc.ErrServerStopped) { + klog.Infof("gRPC server stopped serving") + return nil + } + return err } // GetContainerInfo get container info according to volume id @@ -797,7 +819,7 @@ func setAzureCredentials(ctx context.Context, kubeClient kubernetes.Interface, a Type: "Opaque", } _, err := kubeClient.CoreV1().Secrets(secretNamespace).Create(ctx, secret, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { + if apierror.IsAlreadyExists(err) { err = nil } if err != nil { diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 6941bd191..f5a47ec71 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -26,10 +26,12 @@ import ( "sort" "strings" "testing" + "time" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" v1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -132,7 +134,15 @@ func TestRun(t *testing.T) { os.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile) d := NewFakeDriver() - d.Run("tcp://127.0.0.1:0", true) + + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx, "tcp://127.0.0.1:0") }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) }, }, { @@ -159,7 +169,14 @@ func TestRun(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{} d.NodeID = "" - d.Run("tcp://127.0.0.1:0", true) + ctx, cancelFn := context.WithCancel(context.Background()) + var routines errgroup.Group + routines.Go(func() error { return d.Run(ctx, "tcp://127.0.0.1:0") }) + time.Sleep(time.Millisecond * 500) + cancelFn() + time.Sleep(time.Millisecond * 500) + err := routines.Wait() + assert.Nil(t, err) }, }, } diff --git a/pkg/blobplugin/main.go b/pkg/blobplugin/main.go index 68d8d46cd..62d08fab2 100644 --- a/pkg/blobplugin/main.go +++ b/pkg/blobplugin/main.go @@ -88,7 +88,9 @@ func handle() { if driver == nil { klog.Fatalln("Failed to initialize Azure Blob Storage CSI driver") } - driver.Run(*endpoint, false) + if err := driver.Run(context.Background(), *endpoint); err != nil { + klog.Fatalf("Failed to run Azure Blob Storage CSI driver: %v", err) + } } func exportMetrics() { diff --git a/pkg/csi-common/server.go b/pkg/csi-common/server.go deleted file mode 100644 index d046b8982..000000000 --- a/pkg/csi-common/server.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csicommon - -import ( - "net" - "os" - "sync" - "time" - - "google.golang.org/grpc" - "k8s.io/klog/v2" - - "github.com/container-storage-interface/spec/lib/go/csi" -) - -// Defines Non blocking GRPC server interfaces -type NonBlockingGRPCServer interface { - // Start services at the endpoint - Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool) - // Waits for the service to stop - Wait() - // Stops the service gracefully - Stop() - // Stops the service forcefully - ForceStop() -} - -func NewNonBlockingGRPCServer() NonBlockingGRPCServer { - return &nonBlockingGRPCServer{} -} - -// NonBlocking server -type nonBlockingGRPCServer struct { - wg sync.WaitGroup - server *grpc.Server -} - -func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool) { - s.wg.Add(1) - go s.serve(endpoint, ids, cs, ns, testMode) -} - -func (s *nonBlockingGRPCServer) Wait() { - s.wg.Wait() -} - -func (s *nonBlockingGRPCServer) Stop() { - s.server.GracefulStop() -} - -func (s *nonBlockingGRPCServer) ForceStop() { - s.server.Stop() -} - -func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool) { - proto, addr, err := ParseEndpoint(endpoint) - if err != nil { - klog.Fatal(err.Error()) - } - - if proto == "unix" { - addr = "/" + addr - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - klog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) - } - } - - listener, err := net.Listen(proto, addr) - if err != nil { - klog.Fatalf("Failed to listen: %v", err) - } - - opts := []grpc.ServerOption{ - grpc.UnaryInterceptor(logGRPC), - } - server := grpc.NewServer(opts...) - s.server = server - - if ids != nil { - csi.RegisterIdentityServer(server, ids) - } - if cs != nil { - csi.RegisterControllerServer(server, cs) - } - if ns != nil { - csi.RegisterNodeServer(server, ns) - } - - // Used to stop the server while running tests - if testMode { - s.wg.Done() - go func() { - // make sure Serve() is called - s.wg.Wait() - time.Sleep(time.Millisecond * 1000) - s.server.GracefulStop() - }() - } - klog.Infof("Listening for connections on address: %#v", listener.Addr()) - if err := server.Serve(listener); err != nil { - klog.Errorf("Listening for connections on address: %#v, error: %v", listener.Addr(), err) - } -} diff --git a/pkg/csi-common/server_test.go b/pkg/csi-common/server_test.go deleted file mode 100644 index 18e07712c..000000000 --- a/pkg/csi-common/server_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csicommon - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" -) - -func TestNewNonBlockingGRPCServer(t *testing.T) { - s := NewNonBlockingGRPCServer() - assert.NotNil(t, s) -} - -func TestStart(_ *testing.T) { - s := NewNonBlockingGRPCServer() - // sleep a while to avoid race condition in unit test - time.Sleep(time.Millisecond * 500) - s.Start("tcp://127.0.0.1:0", nil, nil, nil, true) - time.Sleep(time.Millisecond * 500) -} - -func TestServe(_ *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.wg = sync.WaitGroup{} - //need to add one here as the actual also requires one. - s.wg.Add(1) - s.serve("tcp://127.0.0.1:0", nil, nil, nil, true) -} - -func TestWait(_ *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.wg = sync.WaitGroup{} - s.Wait() -} - -func TestStop(_ *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.Stop() -} - -func TestForceStop(_ *testing.T) { - s := nonBlockingGRPCServer{} - s.server = grpc.NewServer() - s.ForceStop() -} diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index b85531cfe..d9536010e 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -18,6 +18,9 @@ package csicommon import ( "fmt" + "net" + "os" + "runtime" "strings" "github.com/container-storage-interface/spec/lib/go/csi" @@ -37,6 +40,31 @@ func ParseEndpoint(ep string) (string, string, error) { return "", "", fmt.Errorf("Invalid endpoint: %v", ep) } +func Listen(ctx context.Context, endpoint string) (net.Listener, error) { + proto, addr, err := ParseEndpoint(endpoint) + if err != nil { + klog.Errorf(err.Error()) + return nil, err + } + + if proto == "unix" { + if runtime.GOOS != "windows" { + addr = "/" + addr + } + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + klog.Errorf("Failed to remove %s, error: %s", addr, err.Error()) + return nil, err + } + } + listenConfig := net.ListenConfig{} + listener, err := listenConfig.Listen(ctx, proto, addr) + if err != nil { + klog.Errorf("Failed to listen: %v", err) + return nil, err + } + return listener, nil +} + func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { return &csi.VolumeCapability_AccessMode{Mode: mode} } @@ -70,7 +98,7 @@ func getLogLevel(method string) int32 { return 2 } -func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { level := klog.Level(getLogLevel(info.FullMethod)) klog.V(level).Infof("GRPC call: %s", info.FullMethod) klog.V(level).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) diff --git a/pkg/csi-common/utils_test.go b/pkg/csi-common/utils_test.go index 6769cf18f..8a75286c5 100644 --- a/pkg/csi-common/utils_test.go +++ b/pkg/csi-common/utils_test.go @@ -22,6 +22,7 @@ import ( "flag" "io" "os" + "runtime" "testing" "github.com/container-storage-interface/spec/lib/go/csi" @@ -123,7 +124,7 @@ func TestLogGRPC(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // EXECUTE - _, _ = logGRPC(context.Background(), test.req, &info, handler) + _, _ = LogGRPC(context.Background(), test.req, &info, handler) klog.Flush() // ASSERT @@ -234,3 +235,54 @@ func TestMain(m *testing.M) { klog.SetOutput(io.Discard) os.Exit(m.Run()) } + +func TestListen(t *testing.T) { + tests := []struct { + name string + endpoint string + filePath string + wantErr bool + skip bool + }{ + { + name: "unix socket", + endpoint: "unix:///tmp/csi.sock", + filePath: "/tmp/csi.sock", + wantErr: false, + skip: runtime.GOOS == "windows", + }, + { + name: "tcp socket", + endpoint: "tcp://127.0.0.1:0", + wantErr: false, + }, + { + name: "invalid endpoint", + endpoint: "invalid://", + wantErr: true, + }, + { + name: "invalid unix socket", + endpoint: "unix://does/not/exist", + wantErr: true, + }, + } + for _, tt := range tests { + if tt.skip { + continue + } + t.Run(tt.name, func(t *testing.T) { + got, err := Listen(context.Background(), tt.endpoint) + if (err != nil) != tt.wantErr { + t.Errorf("Listen() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err == nil { + got.Close() + if tt.filePath != "" { + os.Remove(tt.filePath) + } + } + }) + } +} diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index c4604f0a7..8c6b3c641 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -158,7 +158,8 @@ var _ = ginkgo.SynchronizedBeforeSuite(func(ctx ginkgo.SpecContext) []byte { gomega.Expect(err).NotTo(gomega.HaveOccurred()) blobDriver = blob.NewDriver(&driverOptions, kubeClient, cloud) go func() { - blobDriver.Run(fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), false) + err := blobDriver.Run(context.Background(), fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String())) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() }) From beb1c17a669d8e811ef2ddc4719bb8e40f5e52c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 16:17:51 +0000 Subject: [PATCH 023/157] chore(deps): bump google.golang.org/protobuf from 1.31.0 to 1.32.0 Bumps google.golang.org/protobuf from 1.31.0 to 1.32.0. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../protobuf/encoding/protojson/decode.go | 38 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 39 +- .../encoding/protojson/well_known_types.go | 55 +- .../protobuf/encoding/prototext/decode.go | 8 +- .../protobuf/encoding/prototext/encode.go | 4 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 183 +- .../protobuf/internal/filedesc/desc.go | 47 +- .../protobuf/internal/genid/descriptor_gen.go | 212 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/legacy_message.go | 19 +- .../protobuf/internal/impl/message.go | 17 +- .../protobuf/internal/impl/pointer_reflect.go | 36 + .../protobuf/internal/impl/pointer_unsafe.go | 40 + ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 2 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 24 + .../protobuf/reflect/protodesc/editions.go | 177 ++ .../reflect/protodesc/editions_defaults.binpb | 4 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/proto.go | 83 +- .../reflect/protoreflect/source_gen.go | 62 +- .../protobuf/reflect/protoreflect/type.go | 44 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_union.go | 44 +- ...{value_unsafe.go => value_unsafe_go120.go} | 4 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 24 +- .../types/descriptorpb/descriptor.pb.go | 2467 ++++++++++++----- .../protobuf/types/dynamicpb/dynamic.go | 41 +- .../protobuf/types/dynamicpb/types.go | 25 +- .../protobuf/types/known/anypb/any.pb.go | 3 +- vendor/modules.txt | 4 +- 45 files changed, 3092 insertions(+), 1091 deletions(-) rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go diff --git a/go.mod b/go.mod index 3549aebdc..a56afdd05 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/stretchr/testify v1.8.4 golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 k8s.io/api v0.28.4 k8s.io/apimachinery v0.28.4 k8s.io/client-go v0.28.4 diff --git a/go.sum b/go.sum index 9132c9cf6..a6fcad5ed 100644 --- a/go.sum +++ b/go.sum @@ -413,8 +413,8 @@ google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d8..f47902371 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb1..ae71007c1 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870e..3f75098b6 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d4174..25329b769 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,25 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } } + if open == 0 { + return nil + } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4a..a45f112bc 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41d..95967e811 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686cf..e942bc983 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1b..a45625c8d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689bae..193c68e8f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,35 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures FileEditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + FileEditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -210,6 +249,9 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + // Edition features. + Presence bool } Oneof struct { @@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool { return fd.L1.StringNam func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + } return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b215..8f94230ea 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,12 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +87,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +190,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +203,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -212,29 +221,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -478,6 +484,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -500,6 +507,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -525,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -547,6 +556,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +564,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +575,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +596,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +610,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +627,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -642,6 +657,27 @@ const ( FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +686,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +710,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +725,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +738,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +764,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +790,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +803,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -816,6 +870,120 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63e..f55dc01e3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fac..2ab2c6297 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a0..629bacdce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf4..517e94434 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573e..4b020e311 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d341..a008acd09 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 000000000..60166f2ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d5..d8f48faff 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 32 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946b..e5b03b567 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717f..80ed16a0c 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d0..4fed202f9 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda8..17899a3a7 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab331..3c6fe5780 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b1..7543ee6b2 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb1205..baa0cc621 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1af..aff6fd490 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + f.L1.HasPacked = true + f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + + if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 000000000..7352926ca --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,177 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + _ "embed" + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +//go:embed editions_defaults.binpb +var binaryEditionDefaults []byte +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(binaryEditionDefaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.FieldPresence == nil { + return fileDesc.L1.EditionFeatures.IsFieldPresence + } + return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT +} + +func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.RepeatedFieldEncoding == nil { + return fileDesc.L1.EditionFeatures.IsPacked + } + return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED +} + +func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.Utf8Validation == nil { + return fileDesc.L1.EditionFeatures.IsUTF8Validated + } + return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY +} + +func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.MessageEncoding == nil { + return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + } + return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + if fs == nil { + fs = &descriptorpb.FeatureSet{} + } + + var fieldPresence descriptorpb.FeatureSet_FieldPresence + if fp := fs.FieldPresence; fp != nil { + fieldPresence = *fp + } else { + fieldPresence = *dfs.FieldPresence + } + fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fieldPresence == descriptorpb.FeatureSet_EXPLICIT + + var enumType descriptorpb.FeatureSet_EnumType + if et := fs.EnumType; et != nil { + enumType = *et + } else { + enumType = *dfs.EnumType + } + fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN + + var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding + if rfe := fs.RepeatedFieldEncoding; rfe != nil { + respeatedFieldEncoding = *rfe + } else { + respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + } + fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED + + var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation + if utf8val := fs.Utf8Validation; utf8val != nil { + isUTF8Validated = *utf8val + } else { + isUTF8Validated = *dfs.Utf8Validation + } + fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY + + var messageEncoding descriptorpb.FeatureSet_MessageEncoding + if me := fs.MessageEncoding; me != nil { + messageEncoding = *me + } else { + messageEncoding = *dfs.MessageEncoding + } + fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED + + var jsonFormat descriptorpb.FeatureSet_JsonFormat + if jf := fs.JsonFormat; jf != nil { + jsonFormat = *jf + } else { + jsonFormat = *dfs.JsonFormat + } + fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb new file mode 100644 index 000000000..1a8610a84 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceffc..9d6e05420 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa14922..ec6572dfd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -436,7 +439,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +483,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f3..0c045db6a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -180,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +242,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +289,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +369,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +545,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d3..60ff62b4c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b781..a7b0d06ff 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 591652541..654599d44 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73f..160309731 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22a..b1fdbe3e8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 000000000..435470111 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb559774..6267dc52a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f737..38daa858d 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,94 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +168,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +213,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +315,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +474,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +536,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +598,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +682,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +744,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +770,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_NONE FeatureSet_Utf8Validation = 1 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 1: "NONE", + 2: "VERIFY", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "NONE": 1, + "VERIFY": 2, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1165,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1188,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1271,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1391,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1528,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1591,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1772,6 +2229,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2205,11 +2682,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2798,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features } return nil } @@ -2348,6 +2832,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2902,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2972,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2997,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3079,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3160,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2763,35 +3304,200 @@ func (x *UninterpretedOption) GetPositiveIntValue() uint64 { if x != nil && x.PositiveIntValue != nil { return *x.PositiveIntValue } - return 0 + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (x *UninterpretedOption) GetNegativeIntValue() int64 { - if x != nil && x.NegativeIntValue != nil { - return *x.NegativeIntValue - } - return 0 +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *UninterpretedOption) GetStringValue() []byte { +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { if x != nil { - return x.StringValue + return x.Defaults } return nil } -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition } - return "" + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN } // Encapsulates information about the original source file from which a @@ -2855,7 +3561,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3574,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3613,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3727,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3784,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3796,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3809,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3846,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3921,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3993,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3388,7 +4196,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4283,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,527 +4396,687 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, + 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, + 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, + 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, + 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, + 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, + 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, + 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, + 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, + 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, + 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, + 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, + 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, + 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, + 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, + 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, + 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, + 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, + 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, + 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, + 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, + 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, + 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, + 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, + 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, + 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, + 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, + 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, + 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, + 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, + 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, + 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, + 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, + 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, + 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, + 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, + 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, + 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, + 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, + 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, + 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5562,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5574,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index f77ef0de1..54ae1ab87 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -49,12 +49,13 @@ type extensionType struct { // A Message is a dynamically constructed protocol buffer message. // -// Message implements the proto.Message interface, and may be used with all -// standard proto package functions such as Marshal, Unmarshal, and so forth. +// Message implements the [google.golang.org/protobuf/proto.Message] interface, +// and may be used with all standard proto package functions +// such as Marshal, Unmarshal, and so forth. // -// Message also implements the protoreflect.Message interface. See the protoreflect -// package documentation for that interface for how to get and set fields and -// otherwise interact with the contents of a Message. +// Message also implements the [protoreflect.Message] interface. +// See the [protoreflect] package documentation for that interface for how to +// get and set fields and otherwise interact with the contents of a Message. // // Reflection API functions which construct messages, such as NewField, // return new dynamic messages of the appropriate type. Functions which take @@ -87,7 +88,7 @@ func NewMessage(desc protoreflect.MessageDescriptor) *Message { // ProtoMessage implements the legacy message interface. func (m *Message) ProtoMessage() {} -// ProtoReflect implements the protoreflect.ProtoMessage interface. +// ProtoReflect implements the [protoreflect.ProtoMessage] interface. func (m *Message) ProtoReflect() protoreflect.Message { return m } @@ -115,25 +116,25 @@ func (m *Message) Type() protoreflect.MessageType { } // New returns a newly allocated empty message with the same descriptor. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) New() protoreflect.Message { return m.Type().New() } // Interface returns the message. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Interface() protoreflect.ProtoMessage { return m } -// ProtoMethods is an internal detail of the protoreflect.Message interface. +// ProtoMethods is an internal detail of the [protoreflect.Message] interface. // Users should never call this directly. func (m *Message) ProtoMethods() *protoiface.Methods { return nil } // Range visits every populated field in undefined order. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { for num, v := range m.known { fd := m.ext[num] @@ -150,7 +151,7 @@ func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) } // Has reports whether a field is populated. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { m.checkField(fd) if fd.IsExtension() && m.ext[fd.Number()] != fd { @@ -164,7 +165,7 @@ func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { } // Clear clears a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Clear(fd protoreflect.FieldDescriptor) { m.checkField(fd) num := fd.Number() @@ -173,7 +174,7 @@ func (m *Message) Clear(fd protoreflect.FieldDescriptor) { } // Get returns the value of a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) num := fd.Number() @@ -212,7 +213,7 @@ func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Mutable returns a mutable reference to a repeated, map, or message field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { @@ -241,7 +242,7 @@ func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Set stores a value in a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { m.checkField(fd) if m.known == nil { @@ -284,7 +285,7 @@ func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { } // NewField returns a new value for assignable to the field of a given descriptor. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) switch { @@ -305,7 +306,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { } // WhichOneof reports which field in a oneof is populated, returning nil if none are populated. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { for i := 0; i < od.Fields().Len(); i++ { fd := od.Fields().Get(i) @@ -317,13 +318,13 @@ func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.Field } // GetUnknown returns the raw unknown fields. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) GetUnknown() protoreflect.RawFields { return m.unknown } // SetUnknown sets the raw unknown fields. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) SetUnknown(r protoreflect.RawFields) { if m.known == nil { panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) @@ -332,7 +333,7 @@ func (m *Message) SetUnknown(r protoreflect.RawFields) { } // IsValid reports whether the message is valid. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) IsValid() bool { return m.known != nil } diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index 5a8010f18..c432817bb 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -23,13 +23,20 @@ type extField struct { // A Types is a collection of dynamically constructed descriptors. // Its methods are safe for concurrent use. // -// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver. -// A Types may be used as a proto.UnmarshalOptions.Resolver. +// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver]. +// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver]. type Types struct { + // atomicExtFiles is used with sync/atomic and hence must be the first word + // of the struct to guarantee 64-bit alignment. + // + // TODO(stapelberg): once we only support Go 1.19 and newer, switch this + // field to be of type atomic.Uint64 to guarantee alignment on + // stack-allocated values, too. + atomicExtFiles uint64 + extMu sync.Mutex + files *protoregistry.Files - extMu sync.Mutex - atomicExtFiles uint64 extensionsByMessage map[extField]protoreflect.ExtensionDescriptor } @@ -45,7 +52,7 @@ func NewTypes(f *protoregistry.Files) *Types { // FindEnumByName looks up an enum by its full name; // e.g., "google.protobuf.Field.Kind". // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -63,7 +70,7 @@ func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumTyp // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -79,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex // FindExtensionByNumber looks up an extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. @@ -96,7 +103,7 @@ func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field proto // FindMessageByName looks up a message by its full name; // e.g. "google.protobuf.Any". // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -112,7 +119,7 @@ func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.Mess // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f4..9de51be54 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/modules.txt b/vendor/modules.txt index 00269a6d3..bee5b5a1c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -697,8 +697,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.32.0 +## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire From fbcd7a6f4c2cbf8685579f48bbceece29e586403 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Dec 2023 11:50:15 +0800 Subject: [PATCH 024/157] Create capz-template-prow-workload-identity.yaml --- .../capz-template-prow-workload-identity.yaml | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 test/manifest/capz-template-prow-workload-identity.yaml diff --git a/test/manifest/capz-template-prow-workload-identity.yaml b/test/manifest/capz-template-prow-workload-identity.yaml new file mode 100644 index 000000000..cbb61315b --- /dev/null +++ b/test/manifest/capz-template-prow-workload-identity.yaml @@ -0,0 +1,255 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cni: calico + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + role: node + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + feature-gates: MixedProtocolLBService=true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: WorkloadIdentity +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: |- + installation: + cni: + type: Calico + calicoNetwork: + bgp: Disabled + mtu: 1350 + ipPools: + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + registry: mcr.microsoft.com/oss + # Image and registry configuration for the tigera/operator pod. + tigeraOperator: + image: tigera/operator + registry: mcr.microsoft.com/oss + calicoctl: + image: mcr.microsoft.com/oss/calico/ctl + version: ${CALICO_VERSION} From 8518ea6c6d0c06916a3c21fb5b047adf027295b5 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 26 Dec 2023 11:18:12 +0000 Subject: [PATCH 025/157] feat: upgrade azcopy to v10.22.1 for volume clone feature --- .trivyignore | 1 - pkg/blobplugin/Dockerfile | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore deleted file mode 100644 index ac4250b0e..000000000 --- a/.trivyignore +++ /dev/null @@ -1 +0,0 @@ -CVE-2023-48795 diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index a5a09910f..2d639afbc 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -43,9 +43,9 @@ else \ RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy -ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.21.2-20231106/azcopy_linux_amd64_10.21.2.tar.gz +ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.1-20231220/azcopy_linux_amd64_10.22.1.tar.gz RUN if [ "$ARCH" == "arm64" ] ; then \ - azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.21.2-20231106/azcopy_linux_arm64_10.21.2.tar.gz; fi + azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.1-20231220/azcopy_linux_arm64_10.22.1.tar.gz; fi RUN wget -O azcopy.tar.gz ${azcopyURL} && \ tar xvzf azcopy.tar.gz -C . && rm azcopy.tar.gz && \ mv ./azcopy_linux_$ARCH_*/azcopy /usr/local/bin/azcopy && \ From 56dc8232ece20ab96ef3e0d864c17cc91db25e44 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 27 Dec 2023 03:56:35 +0000 Subject: [PATCH 026/157] test: always use latest azcopy version in sanity test fix test failure --- test/sanity/run-test.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh index d82fb6b56..e20a76fef 100755 --- a/test/sanity/run-test.sh +++ b/test/sanity/run-test.sh @@ -34,12 +34,12 @@ fi azcopyPath="/usr/local/bin/azcopy" if [ ! -f "$azcopyPath" ]; then - azcopyVersion=azcopy_linux_amd64_10.18.1 echo 'Downloading azcopy...' - wget -c https://azcopyvnext.azureedge.net/release20230420/$azcopyVersion.tar.gz - tar -zxvf $azcopyVersion.tar.gz - mv ./$azcopyVersion/azcopy /usr/local/bin/azcopy - rm -rf ./$azcopyVersion* + azcopyTarFile="azcopy.tar.gz" + wget -O $azcopyTarFile https://aka.ms/downloadazcopy-v10-linux + tar -zxvf $azcopyTarFile + mv ./azcopy*/azcopy /usr/local/bin/azcopy + rm -rf ./$azcopyTarFile chmod +x /usr/local/bin/azcopy fi From ec7152dc83e93323bb5768f39b3e0c6b4ad144a1 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 27 Dec 2023 13:15:33 +0000 Subject: [PATCH 027/157] fix: reduce mount lock to avoid volumeID collision issue --- pkg/blob/nodeserver.go | 10 ++++++---- pkg/blob/nodeserver_test.go | 8 ++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index b78f466d0..d76faf4b9 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -230,10 +230,11 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") } - if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired { + lockKey := fmt.Sprintf("%s-%s", volumeID, targetPath) + if acquired := d.volumeLocks.TryAcquire(lockKey); !acquired { return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID) } - defer d.volumeLocks.Release(volumeID) + defer d.volumeLocks.Release(lockKey) mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags() attrib := req.GetVolumeContext() @@ -445,10 +446,11 @@ func (d *Driver) NodeUnstageVolume(_ context.Context, req *csi.NodeUnstageVolume return nil, status.Error(codes.InvalidArgument, "Staging target not provided") } - if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired { + lockKey := fmt.Sprintf("%s-%s", volumeID, stagingTargetPath) + if acquired := d.volumeLocks.TryAcquire(lockKey); !acquired { return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID) } - defer d.volumeLocks.Release(volumeID) + defer d.volumeLocks.Release(lockKey) mc := metrics.NewMetricContext(blobCSIDriverName, "node_unstage_volume", d.cloud.ResourceGroup, "", d.Name) isOperationSucceeded := false diff --git a/pkg/blob/nodeserver_test.go b/pkg/blob/nodeserver_test.go index 8ca86d155..6a4cec9b1 100644 --- a/pkg/blob/nodeserver_test.go +++ b/pkg/blob/nodeserver_test.go @@ -435,8 +435,8 @@ func TestNodeStageVolume(t *testing.T) { VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, } d := NewFakeDriver() - d.volumeLocks.TryAcquire("unit-test") - defer d.volumeLocks.Release("unit-test") + d.volumeLocks.TryAcquire(fmt.Sprintf("%s-%s", "unit-test", "unit-test")) + defer d.volumeLocks.Release(fmt.Sprintf("%s-%s", "unit-test", "unit-test")) _, err := d.NodeStageVolume(context.TODO(), req) expectedErr := status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "unit-test")) if !reflect.DeepEqual(err, expectedErr) { @@ -606,8 +606,8 @@ func TestNodeUnstageVolume(t *testing.T) { VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, } d := NewFakeDriver() - d.volumeLocks.TryAcquire("unit-test") - defer d.volumeLocks.Release("unit-test") + d.volumeLocks.TryAcquire(fmt.Sprintf("%s-%s", "unit-test", "unit-test")) + defer d.volumeLocks.Release(fmt.Sprintf("%s-%s", "unit-test", "unit-test")) _, err := d.NodeStageVolume(context.TODO(), req) expectedErr := status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "unit-test")) if !reflect.DeepEqual(err, expectedErr) { From 7e0f0e96244e22f99cbf80d82829e46f8274edc9 Mon Sep 17 00:00:00 2001 From: umagnus Date: Fri, 8 Dec 2023 03:11:11 +0000 Subject: [PATCH 028/157] use cluster identity for azcopy --- deploy/example/cloning/README.md | 2 +- hack/update-mock.sh | 0 pkg/blob/controllerserver.go | 105 ++++++++-- pkg/blob/controllerserver_test.go | 319 +++++++++++++++++++++++++++++- pkg/util/util.go | 25 ++- pkg/util/util_mock.go | 8 +- pkg/util/util_test.go | 6 +- 7 files changed, 429 insertions(+), 36 deletions(-) mode change 100644 => 100755 hack/update-mock.sh diff --git a/deploy/example/cloning/README.md b/deploy/example/cloning/README.md index c81e11482..797026748 100644 --- a/deploy/example/cloning/README.md +++ b/deploy/example/cloning/README.md @@ -21,7 +21,7 @@ outfile ## Create a PVC from an existing PVC > Make sure application is not writing data to source blob container ```console -kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/deploy/example/cloning/pvc-blob-cloning.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/deploy/example/cloning/pvc-blob-csi-cloning.yaml ``` ### Check the Creation Status diff --git a/hack/update-mock.sh b/hack/update-mock.sh old mode 100644 new mode 100755 diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index e10044c0d..b1c6c13ae 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/url" + "os" "os/exec" "strconv" "strings" @@ -50,6 +51,15 @@ import ( const ( privateEndpoint = "privateendpoint" + azcopyAutoLoginType = "AZCOPY_AUTO_LOGIN_TYPE" + azcopySPAApplicationID = "AZCOPY_SPA_APPLICATION_ID" + azcopySPAClientSecret = "AZCOPY_SPA_CLIENT_SECRET" + azcopyTenantID = "AZCOPY_TENANT_ID" + azcopyMSIClientID = "AZCOPY_MSI_CLIENT_ID" + MSI = "MSI" + SPN = "SPN" + authorizationPermissionMismatch = "AuthorizationPermissionMismatch" + waitForCopyInterval = 5 * time.Second waitForCopyTimeout = 3 * time.Minute ) @@ -73,7 +83,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if acquired := d.volumeLocks.TryAcquire(volName); !acquired { // logging the job status if it's volume cloning if req.GetVolumeContentSource() != nil { - jobState, percent, err := d.azcopy.GetAzcopyJob(volName) + jobState, percent, err := d.azcopy.GetAzcopyJob(volName, []string{}) klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) } return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volName) @@ -412,12 +422,11 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } if req.GetVolumeContentSource() != nil { - if accountKey == "" { - if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { - return nil, status.Errorf(codes.Internal, "failed to GetStorageAccesskey on account(%s) rg(%s), error: %v", accountOptions.Name, accountOptions.ResourceGroup, err) - } + var accountSASToken string + if accountSASToken, err = d.getSASToken(ctx, accountName, accountKey, storageEndpointSuffix, accountOptions, secrets, secretName, secretNamespace); err != nil { + return nil, status.Errorf(codes.Internal, "failed to getSASToken on account(%s) rg(%s), error: %v", accountOptions.Name, accountOptions.ResourceGroup, err) } - if err := d.copyVolume(ctx, req, accountKey, validContainerName, storageEndpointSuffix); err != nil { + if err := d.copyVolume(ctx, req, accountSASToken, validContainerName, storageEndpointSuffix); err != nil { return nil, err } } else { @@ -712,7 +721,7 @@ func (d *Driver) DeleteBlobContainer(ctx context.Context, subsID, resourceGroupN } // CopyBlobContainer copies a blob container in the same storage account -func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeRequest, accountKey, dstContainerName, storageEndpointSuffix string) error { +func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeRequest, accountSasToken, dstContainerName, storageEndpointSuffix string) error { var sourceVolumeID string if req.GetVolumeContentSource() != nil && req.GetVolumeContentSource().GetVolume() != nil { sourceVolumeID = req.GetVolumeContentSource().GetVolume().GetVolumeId() @@ -726,10 +735,11 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque return fmt.Errorf("srcContainerName(%s) or dstContainerName(%s) is empty", srcContainerName, dstContainerName) } - klog.V(2).Infof("generate sas token for account(%s)", accountName) - accountSasToken, genErr := generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) - if genErr != nil { - return genErr + var authAzcopyEnv []string + if accountSasToken == "" { + if authAzcopyEnv, err = d.authorizeAzcopyWithIdentity(); err != nil { + return err + } } timeAfter := time.After(waitForCopyTimeout) @@ -737,7 +747,7 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque srcPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, srcContainerName, accountSasToken) dstPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, dstContainerName, accountSasToken) - jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName) + jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) if jobState == util.AzcopyJobError || jobState == util.AzcopyJobCompleted { return err @@ -746,14 +756,18 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque for { select { case <-timeTick: - jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName) + jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) switch jobState { case util.AzcopyJobError, util.AzcopyJobCompleted: return err case util.AzcopyJobNotFound: klog.V(2).Infof("copy blob container %s to %s", srcContainerName, dstContainerName) - out, copyErr := exec.Command("azcopy", "copy", srcPath, dstPath, "--recursive", "--check-length=false").CombinedOutput() + cmd := exec.Command("azcopy", "copy", srcPath, dstPath, "--recursive", "--check-length=false") + if len(authAzcopyEnv) > 0 { + cmd.Env = append(os.Environ(), authAzcopyEnv...) + } + out, copyErr := cmd.CombinedOutput() if copyErr != nil { klog.Warningf("CopyBlobContainer(%s, %s, %s) failed with error(%v): %v", resourceGroupName, accountName, dstPath, copyErr, string(out)) } else { @@ -768,18 +782,77 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque } // copyVolume copies a volume form volume or snapshot, snapshot is not supported now -func (d *Driver) copyVolume(ctx context.Context, req *csi.CreateVolumeRequest, accountKey, dstContainerName, storageEndpointSuffix string) error { +func (d *Driver) copyVolume(ctx context.Context, req *csi.CreateVolumeRequest, accountSASToken, dstContainerName, storageEndpointSuffix string) error { vs := req.VolumeContentSource switch vs.Type.(type) { case *csi.VolumeContentSource_Snapshot: return status.Errorf(codes.InvalidArgument, "copy volume from volumeSnapshot is not supported") case *csi.VolumeContentSource_Volume: - return d.copyBlobContainer(ctx, req, accountKey, dstContainerName, storageEndpointSuffix) + return d.copyBlobContainer(ctx, req, accountSASToken, dstContainerName, storageEndpointSuffix) default: return status.Errorf(codes.InvalidArgument, "%v is not a proper volume source", vs) } } +func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { + azureAuthConfig := d.cloud.Config.AzureAuthConfig + var authAzcopyEnv []string + if azureAuthConfig.UseManagedIdentityExtension { + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopyAutoLoginType, MSI)) + if len(azureAuthConfig.UserAssignedIdentityID) > 0 { + klog.V(2).Infof("use user assigned managed identity to authorize azcopy") + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopyMSIClientID, azureAuthConfig.UserAssignedIdentityID)) + } else { + klog.V(2).Infof("use system-assigned managed identity to authorize azcopy") + } + return authAzcopyEnv, nil + } + if len(azureAuthConfig.AADClientSecret) > 0 { + klog.V(2).Infof("use service principal to authorize azcopy") + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopyAutoLoginType, SPN)) + if azureAuthConfig.AADClientID == "" || azureAuthConfig.TenantID == "" { + return []string{}, fmt.Errorf("AADClientID and TenantID must be set when use service principal") + } + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopySPAApplicationID, azureAuthConfig.AADClientID)) + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopySPAClientSecret, azureAuthConfig.AADClientSecret)) + authAzcopyEnv = append(authAzcopyEnv, fmt.Sprintf("%s=%s", azcopyTenantID, azureAuthConfig.TenantID)) + klog.V(2).Infof(fmt.Sprintf("set AZCOPY_SPA_APPLICATION_ID=%s, AZCOPY_TENANT_ID=%s successfully", azureAuthConfig.AADClientID, azureAuthConfig.TenantID)) + + return authAzcopyEnv, nil + } + return []string{}, fmt.Errorf("service principle or managed identity are both not set") +} + +// getSASToken will only generate sas token for azcopy in following conditions: +// 1. secrets is not empty +// 2. driver is not using managed identity and service principal +// 3. azcopy returns AuthorizationPermissionMismatch error when using service principal or managed identity +func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, error) { + authAzcopyEnv, _ := d.authorizeAzcopyWithIdentity() + useSasTokenFallBack := false + if len(authAzcopyEnv) > 0 { + out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) + if testErr != nil { + return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) + } + if strings.Contains(out, authorizationPermissionMismatch) { + klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) + useSasTokenFallBack = true + } + } + if len(secrets) > 0 || len(authAzcopyEnv) == 0 || useSasTokenFallBack { + var err error + if accountKey == "" { + if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { + return "", err + } + } + klog.V(2).Infof("generate sas token for account(%s)", accountName) + return generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) + } + return "", nil +} + // isValidVolumeCapabilities validates the given VolumeCapability array is valid func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error { if len(volCaps) == 0 { diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 7446c2012..25eb5d707 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -33,9 +33,11 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/pointer" "sigs.k8s.io/blob-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -756,6 +758,7 @@ func TestCreateVolume(t *testing.T) { Value: &fakeValue, }) d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) + d.cloud.Config.AzureAuthConfig.UseManagedIdentityExtension = true errorType := NULL d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} @@ -789,6 +792,15 @@ func TestCreateVolume(t *testing.T) { controllerServiceCapability, } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := util.NewMockEXEC(ctrl) + listStr := "no error" + m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, nil) + + d.azcopy.ExecCmd = m + expectedErr := status.Errorf(codes.InvalidArgument, "copy volume from volumeSnapshot is not supported") _, err := d.CreateVolume(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { @@ -812,6 +824,7 @@ func TestCreateVolume(t *testing.T) { Value: &fakeValue, }) d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) + d.cloud.Config.AzureAuthConfig.UseManagedIdentityExtension = true errorType := NULL d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} @@ -845,6 +858,15 @@ func TestCreateVolume(t *testing.T) { controllerServiceCapability, } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := util.NewMockEXEC(ctrl) + listStr := "no error" + m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, nil) + + d.azcopy.ExecCmd = m + expectedErr := status.Errorf(codes.NotFound, "error parsing volume id: \"unit-test\", should at least contain two #") _, err := d.CreateVolume(context.Background(), req) if !reflect.DeepEqual(err, expectedErr) { @@ -1598,6 +1620,38 @@ func TestCopyVolume(t *testing.T) { } }, }, + { + name: "AADClientSecret shouldn't be nil or useManagedIdentityExtension must be set to true when accountSASToken is empty", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + mp := map[string]string{} + + volumeSource := &csi.VolumeContentSource_VolumeSource{ + VolumeId: "vol_1#f5713de20cde511e8ba4900#fileshare#", + } + volumeContentSourceVolumeSource := &csi.VolumeContentSource_Volume{ + Volume: volumeSource, + } + volumecontensource := csi.VolumeContentSource{ + Type: volumeContentSourceVolumeSource, + } + + req := &csi.CreateVolumeRequest{ + Name: "unit-test", + VolumeCapabilities: stdVolumeCapabilities, + Parameters: mp, + VolumeContentSource: &volumecontensource, + } + + ctx := context.Background() + + expectedErr := fmt.Errorf("service principle or managed identity are both not set") + err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected error: %v", err) + } + }, + }, { name: "azcopy job is already completed", testFunc: func(t *testing.T) { @@ -1626,7 +1680,7 @@ func TestCopyVolume(t *testing.T) { m := util.NewMockEXEC(ctrl) listStr := "JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9\nStart Time: Monday, 07-Aug-23 03:29:54 UTC\nStatus: Completed\nCommand: copy https://{accountName}.file.core.windows.net/{srcFileshare}{SAStoken} https://{accountName}.file.core.windows.net/{dstFileshare}{SAStoken} --recursive --check-length=false" - m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3")).Return(listStr, nil) + m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr, nil) // if test.enableShow { // m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstContainer -B 3")).Return(test.showStr, test.showErr) // } @@ -1636,7 +1690,7 @@ func TestCopyVolume(t *testing.T) { ctx := context.Background() var expectedErr error - err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + err := d.copyVolume(ctx, req, "sastoken", "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1671,9 +1725,9 @@ func TestCopyVolume(t *testing.T) { m := util.NewMockEXEC(ctrl) listStr1 := "JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9\nStart Time: Monday, 07-Aug-23 03:29:54 UTC\nStatus: InProgress\nCommand: copy https://{accountName}.file.core.windows.net/{srcFileshare}{SAStoken} https://{accountName}.file.core.windows.net/{dstFileshare}{SAStoken} --recursive --check-length=false" listStr2 := "JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9\nStart Time: Monday, 07-Aug-23 03:29:54 UTC\nStatus: Completed\nCommand: copy https://{accountName}.file.core.windows.net/{srcFileshare}{SAStoken} https://{accountName}.file.core.windows.net/{dstFileshare}{SAStoken} --recursive --check-length=false" - o1 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3")).Return(listStr1, nil).Times(1) - m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstBlobContainer -B 3")).Return("Percent Complete (approx): 50.0", nil) - o2 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3")).Return(listStr2, nil) + o1 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr1, nil).Times(1) + m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstBlobContainer -B 3"), gomock.Any()).Return("Percent Complete (approx): 50.0", nil) + o2 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr2, nil) gomock.InOrder(o1, o2) d.azcopy.ExecCmd = m @@ -1681,7 +1735,7 @@ func TestCopyVolume(t *testing.T) { ctx := context.Background() var expectedErr error - err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + err := d.copyVolume(ctx, req, "sastoken", "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1780,3 +1834,256 @@ func Test_generateSASToken(t *testing.T) { }) } } + +func Test_authorizeAzcopyWithIdentity(t *testing.T) { + testCases := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "use service principal to authorize azcopy", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + TenantID: "TenantID", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientID: "AADClientID", + AADClientSecret: "AADClientSecret", + }, + }, + }, + } + expectedAuthAzcopyEnv := []string{ + fmt.Sprintf(azcopyAutoLoginType + "=SPN"), + fmt.Sprintf(azcopySPAApplicationID + "=AADClientID"), + fmt.Sprintf(azcopySPAClientSecret + "=AADClientSecret"), + fmt.Sprintf(azcopyTenantID + "=TenantID"), + } + var expectedErr error + authAzcopyEnv, err := d.authorizeAzcopyWithIdentity() + if !reflect.DeepEqual(authAzcopyEnv, expectedAuthAzcopyEnv) || !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected authAzcopyEnv: %v, Unexpected error: %v", authAzcopyEnv, err) + } + }, + }, + { + name: "use service principal to authorize azcopy but client id is empty", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + ARMClientConfig: azclient.ARMClientConfig{ + TenantID: "TenantID", + }, + AzureAuthConfig: azclient.AzureAuthConfig{ + AADClientSecret: "AADClientSecret", + }, + }, + }, + } + expectedAuthAzcopyEnv := []string{} + expectedErr := fmt.Errorf("AADClientID and TenantID must be set when use service principal") + authAzcopyEnv, err := d.authorizeAzcopyWithIdentity() + if !reflect.DeepEqual(authAzcopyEnv, expectedAuthAzcopyEnv) || !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected authAzcopyEnv: %v, Unexpected error: %v", authAzcopyEnv, err) + } + }, + }, + { + name: "use user assigned managed identity to authorize azcopy", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + AzureAuthConfig: azclient.AzureAuthConfig{ + UseManagedIdentityExtension: true, + UserAssignedIdentityID: "UserAssignedIdentityID", + }, + }, + }, + } + expectedAuthAzcopyEnv := []string{ + fmt.Sprintf(azcopyAutoLoginType + "=MSI"), + fmt.Sprintf(azcopyMSIClientID + "=UserAssignedIdentityID"), + } + var expectedErr error + authAzcopyEnv, err := d.authorizeAzcopyWithIdentity() + if !reflect.DeepEqual(authAzcopyEnv, expectedAuthAzcopyEnv) || !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected authAzcopyEnv: %v, Unexpected error: %v", authAzcopyEnv, err) + } + }, + }, + { + name: "use system assigned managed identity to authorize azcopy", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + AzureAuthConfig: azclient.AzureAuthConfig{ + UseManagedIdentityExtension: true, + }, + }, + }, + } + expectedAuthAzcopyEnv := []string{ + fmt.Sprintf(azcopyAutoLoginType + "=MSI"), + } + var expectedErr error + authAzcopyEnv, err := d.authorizeAzcopyWithIdentity() + if !reflect.DeepEqual(authAzcopyEnv, expectedAuthAzcopyEnv) || !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected authAzcopyEnv: %v, Unexpected error: %v", authAzcopyEnv, err) + } + }, + }, + { + name: "AADClientSecret be nil and useManagedIdentityExtension is false", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{}, + }, + } + expectedAuthAzcopyEnv := []string{} + expectedErr := fmt.Errorf("service principle or managed identity are both not set") + authAzcopyEnv, err := d.authorizeAzcopyWithIdentity() + if !reflect.DeepEqual(authAzcopyEnv, expectedAuthAzcopyEnv) || !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected authAzcopyEnv: %v, Unexpected error: %v", authAzcopyEnv, err) + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, tc.testFunc) + } +} + +func Test_getSASToken(t *testing.T) { + testCases := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "failed to get accountKey in secrets", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{}, + } + secrets := map[string]string{ + defaultSecretAccountName: "accountName", + } + + ctx := context.Background() + expectedAccountSASToken := "" + expectedErr := fmt.Errorf("could not find accountkey or azurestorageaccountkey field in secrets") + accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { + t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) + } + }, + }, + { + name: "failed to test azcopy list command", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + AzureAuthConfig: azclient.AzureAuthConfig{ + UseManagedIdentityExtension: true, + }, + }, + }, + } + secrets := map[string]string{ + defaultSecretAccountName: "accountName", + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := util.NewMockEXEC(ctrl) + listStr := "error" + m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, fmt.Errorf("error")) + + d.azcopy.ExecCmd = m + + ctx := context.Background() + expectedAccountSASToken := "" + expectedErr := fmt.Errorf("azcopy list command failed with error(%v): %v", fmt.Errorf("error"), "error") + accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { + t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) + } + }, + }, + { + name: "fall back to generate SAS token failed for illegal account key", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{ + AzureAuthConfig: config.AzureAuthConfig{ + AzureAuthConfig: azclient.AzureAuthConfig{ + UseManagedIdentityExtension: true, + }, + }, + }, + } + secrets := map[string]string{ + defaultSecretAccountName: "accountName", + defaultSecretAccountKey: "fakeValue", + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := util.NewMockEXEC(ctrl) + listStr := "RESPONSE 403: 403 This request is not authorized to perform this operation using this permission.\nERROR CODE: AuthorizationPermissionMismatch" + m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, nil) + + d.azcopy.ExecCmd = m + + ctx := context.Background() + expectedAccountSASToken := "" + expectedErr := status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", "accountName", "decode account key: illegal base64 data at input byte 8")) + accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { + t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) + } + }, + }, + { + name: "generate SAS token failed for illegal account key", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{ + Config: azure.Config{}, + } + secrets := map[string]string{ + defaultSecretAccountName: "accountName", + defaultSecretAccountKey: "fakeValue", + } + + ctx := context.Background() + expectedAccountSASToken := "" + expectedErr := status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", "accountName", "decode account key: illegal base64 data at input byte 8")) + accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { + t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, tc.testFunc) + } +} diff --git a/pkg/util/util.go b/pkg/util/util.go index 691984027..c010db385 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -205,14 +205,18 @@ func TrimDuplicatedSpace(s string) string { } type EXEC interface { - RunCommand(string) (string, error) + RunCommand(string, []string) (string, error) } type ExecCommand struct { } -func (ec *ExecCommand) RunCommand(cmd string) (string, error) { - out, err := exec.Command("sh", "-c", cmd).CombinedOutput() +func (ec *ExecCommand) RunCommand(cmdStr string, authEnv []string) (string, error) { + cmd := exec.Command("sh", "-c", cmdStr) + if len(authEnv) > 0 { + cmd.Env = append(os.Environ(), authEnv...) + } + out, err := cmd.CombinedOutput() return string(out), err } @@ -221,7 +225,7 @@ type Azcopy struct { } // GetAzcopyJob get the azcopy job status if job existed -func (ac *Azcopy) GetAzcopyJob(dstBlobContainer string) (AzcopyJobState, string, error) { +func (ac *Azcopy) GetAzcopyJob(dstBlobContainer string, authAzcopyEnv []string) (AzcopyJobState, string, error) { cmdStr := fmt.Sprintf("azcopy jobs list | grep %s -B 3", dstBlobContainer) // cmd output example: // JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9 @@ -236,7 +240,7 @@ func (ac *Azcopy) GetAzcopyJob(dstBlobContainer string) (AzcopyJobState, string, if ac.ExecCmd == nil { ac.ExecCmd = &ExecCommand{} } - out, err := ac.ExecCmd.RunCommand(cmdStr) + out, err := ac.ExecCmd.RunCommand(cmdStr, authAzcopyEnv) // if grep command returns nothing, the exec will return exit status 1 error, so filter this error if err != nil && err.Error() != "exit status 1" { klog.Warningf("failed to get azcopy job with error: %v, jobState: %v", err, AzcopyJobError) @@ -256,7 +260,7 @@ func (ac *Azcopy) GetAzcopyJob(dstBlobContainer string) (AzcopyJobState, string, cmdPercentStr := fmt.Sprintf("azcopy jobs show %s | grep Percent", jobid) // cmd out example: // Percent Complete (approx): 100.0 - summary, err := ac.ExecCmd.RunCommand(cmdPercentStr) + summary, err := ac.ExecCmd.RunCommand(cmdPercentStr, authAzcopyEnv) if err != nil { klog.Warningf("failed to get azcopy job with error: %v, jobState: %v", err, AzcopyJobError) return AzcopyJobError, "", fmt.Errorf("couldn't show jobs summary in azcopy %v", err) @@ -269,6 +273,15 @@ func (ac *Azcopy) GetAzcopyJob(dstBlobContainer string) (AzcopyJobState, string, return jobState, percent, nil } +// TestListJobs test azcopy jobs list command with authAzcopyEnv +func (ac *Azcopy) TestListJobs(accountName, storageEndpointSuffix string, authAzcopyEnv []string) (string, error) { + cmdStr := fmt.Sprintf("azcopy list %s", fmt.Sprintf("https://%s.blob.%s", accountName, storageEndpointSuffix)) + if ac.ExecCmd == nil { + ac.ExecCmd = &ExecCommand{} + } + return ac.ExecCmd.RunCommand(cmdStr, authAzcopyEnv) +} + // parseAzcopyJobList parse command azcopy jobs list, get jobid and state from joblist func parseAzcopyJobList(joblist string) (string, AzcopyJobState, error) { jobid := "" diff --git a/pkg/util/util_mock.go b/pkg/util/util_mock.go index f381ec968..6764d3fcf 100644 --- a/pkg/util/util_mock.go +++ b/pkg/util/util_mock.go @@ -51,16 +51,16 @@ func (m *MockEXEC) EXPECT() *MockEXECMockRecorder { } // RunCommand mocks base method. -func (m *MockEXEC) RunCommand(arg0 string) (string, error) { +func (m *MockEXEC) RunCommand(arg0 string, arg1 []string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunCommand", arg0) + ret := m.ctrl.Call(m, "RunCommand", arg0, arg1) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // RunCommand indicates an expected call of RunCommand. -func (mr *MockEXECMockRecorder) RunCommand(arg0 interface{}) *gomock.Call { +func (mr *MockEXECMockRecorder) RunCommand(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunCommand", reflect.TypeOf((*MockEXEC)(nil).RunCommand), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunCommand", reflect.TypeOf((*MockEXEC)(nil).RunCommand), arg0, arg1) } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 06ba4a400..4c3832b25 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -419,14 +419,14 @@ func TestGetAzcopyJob(t *testing.T) { defer ctrl.Finish() m := NewMockEXEC(ctrl) - m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstBlobContainer -B 3")).Return(test.listStr, test.listErr) + m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstBlobContainer -B 3"), []string{}).Return(test.listStr, test.listErr) if test.enableShow { - m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstBlobContainer -B 3")).Return(test.showStr, test.showErr) + m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstBlobContainer -B 3"), []string{}).Return(test.showStr, test.showErr) } azcopyFunc := &Azcopy{} azcopyFunc.ExecCmd = m - jobState, percent, err := azcopyFunc.GetAzcopyJob(dstBlobContainer) + jobState, percent, err := azcopyFunc.GetAzcopyJob(dstBlobContainer, []string{}) if jobState != test.expectedJobState || percent != test.expectedPercent || !reflect.DeepEqual(err, test.expectedErr) { t.Errorf("test[%s]: unexpected jobState: %v, percent: %v, err: %v, expected jobState: %v, percent: %v, err: %v", test.desc, jobState, percent, err, test.expectedJobState, test.expectedPercent, test.expectedErr) } From 44ac3d69f4f47816b9364b5214477c8c6262f684 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 29 Dec 2023 13:32:54 +0000 Subject: [PATCH 029/157] fix: mountPermission settting issue due to case insensitive parameter --- pkg/blob/blob.go | 14 +++++++++++++ pkg/blob/blob_test.go | 46 ++++++++++++++++++++++++++++++++++++++++++ pkg/blob/nodeserver.go | 2 +- 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index b3b3b4faa..2ee325223 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -994,6 +994,20 @@ func setKeyValueInMap(m map[string]string, key, value string) { m[key] = value } +// getValueInMap get value from map by key +// key in the map is case insensitive +func getValueInMap(m map[string]string, key string) string { + if m == nil { + return "" + } + for k, v := range m { + if strings.EqualFold(k, key) { + return v + } + } + return "" +} + // replaceWithMap replace key with value for str func replaceWithMap(str string, m map[string]string) string { for k, v := range m { diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index f5a47ec71..f2e191981 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -1578,6 +1578,52 @@ func TestSetKeyValueInMap(t *testing.T) { } } +func TestGetValueInMap(t *testing.T) { + tests := []struct { + desc string + m map[string]string + key string + expected string + }{ + { + desc: "nil map", + key: "key", + expected: "", + }, + { + desc: "empty map", + m: map[string]string{}, + key: "key", + expected: "", + }, + { + desc: "non-empty map", + m: map[string]string{"k": "v"}, + key: "key", + expected: "", + }, + { + desc: "same key already exists", + m: map[string]string{"subDir": "value2"}, + key: "subDir", + expected: "value2", + }, + { + desc: "case insensitive key already exists", + m: map[string]string{"subDir": "value2"}, + key: "subdir", + expected: "value2", + }, + } + + for _, test := range tests { + result := getValueInMap(test.m, test.key) + if result != test.expected { + t.Errorf("test[%s]: unexpected output: %v, expected result: %v", test.desc, result, test.expected) + } + } +} + func TestReplaceWithMap(t *testing.T) { tests := []struct { desc string diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index d76faf4b9..891da8c23 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -98,7 +98,7 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu return &csi.NodePublishVolumeResponse{}, err } - if perm := context[mountPermissionsField]; perm != "" { + if perm := getValueInMap(context, mountPermissionsField); perm != "" { var err error if mountPermissions, err = strconv.ParseUint(perm, 8, 32); err != nil { return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid mountPermissions %s", perm)) From 0dca1bec1e8d50bb13ca854e118539a9a1280054 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 30 Dec 2023 07:15:30 +0000 Subject: [PATCH 030/157] feat: support volume mount group --- pkg/blob/blob.go | 7 +++++++ pkg/blob/nodeserver.go | 19 +++++++++++++++++-- pkg/blob/nodeserver_test.go | 26 ++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 2ee325223..bf1ed3915 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -168,6 +168,7 @@ type DriverOptions struct { EnableAznfsMount bool VolStatsCacheExpireInMinutes int SasTokenExpirationMinutes int + EnableVolumeMountGroup bool } func (option *DriverOptions) AddFlags() { @@ -185,6 +186,7 @@ func (option *DriverOptions) AddFlags() { flag.BoolVar(&option.EnableAznfsMount, "enable-aznfs-mount", false, "replace nfs mount with aznfs mount") flag.IntVar(&option.VolStatsCacheExpireInMinutes, "vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") flag.IntVar(&option.SasTokenExpirationMinutes, "sas-token-expiration-minutes", 1440, "sas token expiration minutes during volume cloning") + flag.BoolVar(&option.EnableVolumeMountGroup, "enable-volume-mount-group", true, "indicates whether enabling VOLUME_MOUNT_GROUP") } // Driver implements all interfaces of CSI drivers @@ -204,6 +206,7 @@ type Driver struct { blobfuseProxyConnTimout int mountPermissions uint64 enableAznfsMount bool + enableVolumeMountGroup bool mounter *mount.SafeFormatAndMount volLockMap *util.LockMap // A map storing all volumes with ongoing operations so that additional operations @@ -239,6 +242,7 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p blobfuseProxyConnTimout: options.BlobfuseProxyConnTimout, enableBlobMockMount: options.EnableBlobMockMount, enableGetVolumeStats: options.EnableGetVolumeStats, + enableVolumeMountGroup: options.EnableVolumeMountGroup, appendMountErrorHelpLink: options.AppendMountErrorHelpLink, mountPermissions: options.MountPermissions, enableAznfsMount: options.EnableAznfsMount, @@ -297,6 +301,9 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p if d.enableGetVolumeStats { nodeCap = append(nodeCap, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS) } + if d.enableVolumeMountGroup { + nodeCap = append(nodeCap, csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP) + } d.AddNodeServiceCapabilities(nodeCap) return &d diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 891da8c23..10a1a7c6f 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -237,6 +237,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe defer d.volumeLocks.Release(lockKey) mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags() + volumeMountGroup := req.GetVolumeCapability().GetMount().GetVolumeMountGroup() attrib := req.GetVolumeContext() secrets := req.GetSecrets() @@ -361,6 +362,11 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe if isHnsEnabled { mountOptions = util.JoinMountOptions(mountOptions, []string{"--use-adls=true"}) } + + if !checkGidPresentInMountFlags(mountFlags) && volumeMountGroup != "" { + mountOptions = append(mountOptions, fmt.Sprintf("gid=%s", volumeMountGroup)) + } + tmpPath := fmt.Sprintf("%s/%s", "/mnt", volumeID) if d.appendTimeStampInCacheDir { tmpPath += fmt.Sprintf("#%d", time.Now().Unix()) @@ -372,8 +378,8 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe args = args + " " + opt } - klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\ncontext %v\nmountflags %v\nmountOptions %v\nargs %v\nserverAddress %v", - targetPath, protocol, volumeID, attrib, mountFlags, mountOptions, args, serverAddress) + klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\ncontext %v\nmountflags %v mountOptions %v volumeMountGroup %s\nargs %v\nserverAddress %v", + targetPath, protocol, volumeID, attrib, mountFlags, mountOptions, volumeMountGroup, args, serverAddress) authEnv = append(authEnv, "AZURE_STORAGE_ACCOUNT="+accountName, "AZURE_STORAGE_BLOB_ENDPOINT="+serverAddress) if d.enableBlobMockMount { @@ -653,3 +659,12 @@ func waitForMount(path string, intervel, timeout time.Duration) error { } } } + +func checkGidPresentInMountFlags(mountFlags []string) bool { + for _, mountFlag := range mountFlags { + if strings.HasPrefix(mountFlag, "gid") { + return true + } + } + return false +} diff --git a/pkg/blob/nodeserver_test.go b/pkg/blob/nodeserver_test.go index 6a4cec9b1..16481240b 100644 --- a/pkg/blob/nodeserver_test.go +++ b/pkg/blob/nodeserver_test.go @@ -812,3 +812,29 @@ func Test_waitForMount(t *testing.T) { }) } } + +func TestCheckGidPresentInMountFlags(t *testing.T) { + tests := []struct { + desc string + MountFlags []string + result bool + }{ + { + desc: "[Success] Gid present in mount flags", + MountFlags: []string{"gid=3000"}, + result: true, + }, + { + desc: "[Success] Gid not present in mount flags", + MountFlags: []string{}, + result: false, + }, + } + + for _, test := range tests { + gIDPresent := checkGidPresentInMountFlags(test.MountFlags) + if gIDPresent != test.result { + t.Errorf("[%s]: Expected result : %t, Actual result: %t", test.desc, test.result, gIDPresent) + } + } +} From dc4be68cab686a8502274c61a3174eff20b3f1be Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 31 Dec 2023 03:16:17 +0000 Subject: [PATCH 031/157] feat: add FSGroupChangePolicy param for NFS mount --- docs/driver-parameters.md | 1 + pkg/blob/blob.go | 26 +++++++++++-- pkg/blob/blob_test.go | 39 +++++++++++++++++++ pkg/blob/controllerserver.go | 8 +++- pkg/blob/controllerserver_test.go | 23 +++++++++++ pkg/blob/nodeserver.go | 15 +++++++ pkg/blob/nodeserver_test.go | 19 +++++++++ pkg/util/util.go | 46 ++++++++++++++++++++++ pkg/util/util_test.go | 56 +++++++++++++++++++++++++++ test/e2e/dynamic_provisioning_test.go | 11 ++++-- 10 files changed, 237 insertions(+), 7 deletions(-) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index f96040b87..0232e487c 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -97,6 +97,7 @@ nodeStageSecretRef.name | secret name that stores(check below examples):
`azu nodeStageSecretRef.namespace | secret namespace | k8s namespace | Yes | --- | **Following parameters are only for NFS protocol** | --- | --- | volumeAttributes.mountPermissions | mounted folder permissions | `0777` | No | +fsGroupChangePolicy | indicates how volume's ownership will be changed by the driver, pod `securityContext.fsGroupChangePolicy` is ignored | `OnRootMismatch`(by default), `Always`, `None` | No | `OnRootMismatch` --- | **Following parameters are only for feature: blobfuse [Managed Identity and Service Principal Name auth](https://github.com/Azure/azure-storage-fuse/tree/blobfuse-1.4.5#environment-variables)** | --- | --- | volumeAttributes.AzureStorageAuthType | Authentication Type | `Key`, `SAS`, `MSI`, `SPN` | No | `Key` volumeAttributes.AzureStorageIdentityClientID | Identity Client ID | | No | diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index bf1ed3915..dd64fe697 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -114,6 +114,7 @@ const ( accessTierField = "accesstier" networkEndpointTypeField = "networkendpointtype" mountPermissionsField = "mountpermissions" + fsGroupChangePolicyField = "fsgroupchangepolicy" useDataPlaneAPIField = "usedataplaneapi" // See https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names @@ -145,11 +146,14 @@ const ( VolumeID = "volumeid" defaultStorageEndPointSuffix = "core.windows.net" + + FSGroupChangeNone = "None" ) var ( - supportedProtocolList = []string{Fuse, Fuse2, NFS} - retriableErrors = []string{accountNotProvisioned, tooManyRequests, statusCodeNotFound, containerBeingDeletedDataplaneAPIError, containerBeingDeletedManagementAPIError, clientThrottled} + supportedProtocolList = []string{Fuse, Fuse2, NFS} + retriableErrors = []string{accountNotProvisioned, tooManyRequests, statusCodeNotFound, containerBeingDeletedDataplaneAPIError, containerBeingDeletedManagementAPIError, clientThrottled} + supportedFSGroupChangePolicyList = []string{FSGroupChangeNone, string(v1.FSGroupChangeAlways), string(v1.FSGroupChangeOnRootMismatch)} ) // DriverOptions defines driver parameters specified in driver deployment @@ -169,6 +173,7 @@ type DriverOptions struct { VolStatsCacheExpireInMinutes int SasTokenExpirationMinutes int EnableVolumeMountGroup bool + FSGroupChangePolicy string } func (option *DriverOptions) AddFlags() { @@ -187,6 +192,7 @@ func (option *DriverOptions) AddFlags() { flag.IntVar(&option.VolStatsCacheExpireInMinutes, "vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") flag.IntVar(&option.SasTokenExpirationMinutes, "sas-token-expiration-minutes", 1440, "sas token expiration minutes during volume cloning") flag.BoolVar(&option.EnableVolumeMountGroup, "enable-volume-mount-group", true, "indicates whether enabling VOLUME_MOUNT_GROUP") + flag.StringVar(&option.FSGroupChangePolicy, "fsgroup-change-policy", "", "indicates how the volume's ownership will be changed by the driver, OnRootMismatch is the default value") } // Driver implements all interfaces of CSI drivers @@ -207,6 +213,7 @@ type Driver struct { mountPermissions uint64 enableAznfsMount bool enableVolumeMountGroup bool + fsGroupChangePolicy string mounter *mount.SafeFormatAndMount volLockMap *util.LockMap // A map storing all volumes with ongoing operations so that additional operations @@ -231,7 +238,6 @@ type Driver struct { // NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & // does not support optional driver plugin info manifest field. Refer to CSI spec for more details. func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *provider.Cloud) *Driver { - var err error d := Driver{ volLockMap: util.NewLockMap(), subnetLockMap: util.NewLockMap(), @@ -247,6 +253,7 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p mountPermissions: options.MountPermissions, enableAznfsMount: options.EnableAznfsMount, sasTokenExpirationMinutes: options.SasTokenExpirationMinutes, + fsGroupChangePolicy: options.FSGroupChangePolicy, azcopy: &util.Azcopy{}, KubeClient: kubeClient, cloud: cloud, @@ -255,6 +262,7 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p d.Version = driverVersion d.NodeID = options.NodeID + var err error getter := func(key string) (interface{}, error) { return nil, nil } if d.accountSearchCache, err = azcache.NewTimedCache(time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) @@ -1024,3 +1032,15 @@ func replaceWithMap(str string, m map[string]string) string { } return str } + +func isSupportedFSGroupChangePolicy(policy string) bool { + if policy == "" { + return true + } + for _, v := range supportedFSGroupChangePolicyList { + if policy == v { + return true + } + } + return false +} diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index f2e191981..e1a7eaca3 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -1783,3 +1783,42 @@ func TestDriverOptions_AddFlags(t *testing.T) { } }) } + +func TestIsSupportedFSGroupChangePolicy(t *testing.T) { + tests := []struct { + policy string + expectedResult bool + }{ + { + policy: "", + expectedResult: true, + }, + { + policy: "None", + expectedResult: true, + }, + { + policy: "Always", + expectedResult: true, + }, + { + policy: "OnRootMismatch", + expectedResult: true, + }, + { + policy: "onRootMismatch", + expectedResult: false, + }, + { + policy: "invalid", + expectedResult: false, + }, + } + + for _, test := range tests { + result := isSupportedFSGroupChangePolicy(test.policy) + if result != test.expectedResult { + t.Errorf("isSupportedFSGroupChangePolicy(%s) returned with %v, not equal to %v", test.policy, result, test.expectedResult) + } + } +} diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index b1c6c13ae..c0474c5f0 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -99,7 +99,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } var storageAccountType, subsID, resourceGroup, location, account, containerName, containerNamePrefix, protocol, customTags, secretName, secretNamespace, pvcNamespace string var isHnsEnabled, requireInfraEncryption, enableBlobVersioning, createPrivateEndpoint, enableNfsV3 *bool - var vnetResourceGroup, vnetName, subnetName, accessTier, networkEndpointType, storageEndpointSuffix string + var vnetResourceGroup, vnetName, subnetName, accessTier, networkEndpointType, storageEndpointSuffix, fsGroupChangePolicy string var matchTags, useDataPlaneAPI, getLatestAccountKey bool var softDeleteBlobs, softDeleteContainers int32 var vnetResourceIDs []string @@ -212,6 +212,8 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } case useDataPlaneAPIField: useDataPlaneAPI = strings.EqualFold(v, trueValue) + case fsGroupChangePolicyField: + fsGroupChangePolicy = v default: return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid parameter %q in storage class", k)) } @@ -223,6 +225,10 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } } + if !isSupportedFSGroupChangePolicy(fsGroupChangePolicy) { + return nil, status.Errorf(codes.InvalidArgument, "fsGroupChangePolicy(%s) is not supported, supported fsGroupChangePolicy list: %v", fsGroupChangePolicy, supportedFSGroupChangePolicyList) + } + if matchTags && account != "" { return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("matchTags must set as false when storageAccount(%s) is provided", account)) } diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 25eb5d707..aa8eaa6f6 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -238,6 +238,29 @@ func TestCreateVolume(t *testing.T) { } }, }, + { + name: "Invalid fsGroupChangePolicy", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + d.cloud = &azure.Cloud{} + mp := map[string]string{ + fsGroupChangePolicyField: "test_fsGroupChangePolicy", + } + req := &csi.CreateVolumeRequest{ + Name: "unit-test", + VolumeCapabilities: stdVolumeCapabilities, + Parameters: mp, + } + d.Cap = []*csi.ControllerServiceCapability{ + controllerServiceCapability, + } + _, err := d.CreateVolume(context.Background(), req) + expectedErr := status.Errorf(codes.InvalidArgument, "fsGroupChangePolicy(test_fsGroupChangePolicy) is not supported, supported fsGroupChangePolicy list: [None Always OnRootMismatch]") + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) + } + }, + }, { name: "invalid getLatestAccountKey value", testFunc: func(t *testing.T) { diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 10a1a7c6f..a84dc99eb 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -252,6 +252,8 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe containerNameReplaceMap := map[string]string{} + fsGroupChangePolicy := d.fsGroupChangePolicy + mountPermissions := d.mountPermissions performChmodOp := (mountPermissions > 0) for k, v := range attrib { @@ -287,9 +289,15 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe mountPermissions = perm } } + case fsGroupChangePolicyField: + fsGroupChangePolicy = v } } + if !isSupportedFSGroupChangePolicy(fsGroupChangePolicy) { + return nil, status.Errorf(codes.InvalidArgument, "fsGroupChangePolicy(%s) is not supported, supported fsGroupChangePolicy list: %v", fsGroupChangePolicy, supportedFSGroupChangePolicyList) + } + mnt, err := d.ensureMountPoint(targetPath, fs.FileMode(mountPermissions)) if err != nil { return nil, status.Errorf(codes.Internal, "Could not mount target %q: %v", targetPath, err) @@ -349,6 +357,13 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe klog.V(2).Infof("skip chmod on targetPath(%s) since mountPermissions is set as 0", targetPath) } + if volumeMountGroup != "" && fsGroupChangePolicy != FSGroupChangeNone { + klog.V(2).Infof("set gid of volume(%s) as %s using fsGroupChangePolicy(%s)", volumeID, volumeMountGroup, fsGroupChangePolicy) + if err := volumehelper.SetVolumeOwnership(targetPath, volumeMountGroup, fsGroupChangePolicy); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("SetVolumeOwnership with volume(%s) on %s failed with %v", volumeID, targetPath, err)) + } + } + isOperationSucceeded = true klog.V(2).Infof("volume(%s) mount %s on %s succeeded", volumeID, source, targetPath) return &csi.NodeStageVolumeResponse{}, nil diff --git a/pkg/blob/nodeserver_test.go b/pkg/blob/nodeserver_test.go index 16481240b..6c7c64d29 100644 --- a/pkg/blob/nodeserver_test.go +++ b/pkg/blob/nodeserver_test.go @@ -463,6 +463,25 @@ func TestNodeStageVolume(t *testing.T) { } }, }, + { + name: "[Error] Invalid fsGroupChangePolicy", + testFunc: func(t *testing.T) { + req := &csi.NodeStageVolumeRequest{ + VolumeId: "unit-test", + StagingTargetPath: "unit-test", + VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, + VolumeContext: map[string]string{ + fsGroupChangePolicyField: "test_fsGroupChangePolicy", + }, + } + d := NewFakeDriver() + _, err := d.NodeStageVolume(context.TODO(), req) + expectedErr := status.Error(codes.InvalidArgument, "fsGroupChangePolicy(test_fsGroupChangePolicy) is not supported, supported fsGroupChangePolicy list: [None Always OnRootMismatch]") + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) + } + }, + }, { name: "[Error] Could not mount to target", testFunc: func(t *testing.T) { diff --git a/pkg/util/util.go b/pkg/util/util.go index c010db385..2cf64a99b 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -21,15 +21,18 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "github.com/go-ini/ini" "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/volume" ) const ( @@ -341,3 +344,46 @@ func GetKubeClient(kubeconfig string, kubeAPIQPS float64, kubeAPIBurst int, user kubeCfg.UserAgent = userAgent return kubernetes.NewForConfig(kubeCfg) } + +type VolumeMounter struct { + path string + attributes volume.Attributes +} + +func (l *VolumeMounter) GetPath() string { + return l.path +} + +func (l *VolumeMounter) GetAttributes() volume.Attributes { + return l.attributes +} + +func (l *VolumeMounter) CanMount() error { + return nil +} + +func (l *VolumeMounter) SetUp(_ volume.MounterArgs) error { + return nil +} + +func (l *VolumeMounter) SetUpAt(_ string, _ volume.MounterArgs) error { + return nil +} + +func (l *VolumeMounter) GetMetrics() (*volume.Metrics, error) { + return nil, nil +} + +// SetVolumeOwnership would set gid for path recursively +func SetVolumeOwnership(path, gid, policy string) error { + id, err := strconv.Atoi(gid) + if err != nil { + return fmt.Errorf("convert %s to int failed with %v", gid, err) + } + gidInt64 := int64(id) + fsGroupChangePolicy := v1.FSGroupChangeOnRootMismatch + if policy != "" { + fsGroupChangePolicy = v1.PodFSGroupChangePolicy(policy) + } + return volume.SetVolumeOwnership(&VolumeMounter{path: path}, path, &gidInt64, &fsGroupChangePolicy, nil) +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 4c3832b25..edbf6d0d3 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -600,3 +600,59 @@ users: } } } + +func TestSetVolumeOwnership(t *testing.T) { + tmpVDir, err := os.MkdirTemp(os.TempDir(), "SetVolumeOwnership") + if err != nil { + t.Fatalf("can't make a temp dir: %v", err) + } + //deferred clean up + defer os.RemoveAll(tmpVDir) + + tests := []struct { + path string + gid string + fsGroupChangePolicy string + expectedError error + }{ + { + path: "path", + gid: "", + expectedError: fmt.Errorf("convert %s to int failed with %v", "", `strconv.Atoi: parsing "": invalid syntax`), + }, + { + path: "path", + gid: "alpha", + expectedError: fmt.Errorf("convert %s to int failed with %v", "alpha", `strconv.Atoi: parsing "alpha": invalid syntax`), + }, + { + path: "not-exists", + gid: "1000", + expectedError: fmt.Errorf("lstat not-exists: no such file or directory"), + }, + { + path: tmpVDir, + gid: "1000", + expectedError: nil, + }, + { + path: tmpVDir, + gid: "1000", + fsGroupChangePolicy: "Always", + expectedError: nil, + }, + { + path: tmpVDir, + gid: "1000", + fsGroupChangePolicy: "OnRootMismatch", + expectedError: nil, + }, + } + + for _, test := range tests { + err := SetVolumeOwnership(test.path, test.gid, test.fsGroupChangePolicy) + if err != nil && (err.Error() != test.expectedError.Error()) { + t.Errorf("unexpected error: %v, expected error: %v", err, test.expectedError) + } + } +} diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index 0f2ee5697..679edabd7 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -65,6 +65,8 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { "-o allow_other", "--file-cache-timeout-in-seconds=120", "--cancel-list-on-mount-seconds=0", + "-o uid=0", + "-o gid=0", }, VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", @@ -101,6 +103,8 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { "-o allow_other", "--file-cache-timeout-in-seconds=120", "--cancel-list-on-mount-seconds=0", + "-o uid=0", + "-o gid=0", }, VolumeMount: testsuites.VolumeMountDetails{ NameGenerate: "test-volume-", @@ -516,9 +520,10 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { CSIDriver: testDriver, Pods: pods, StorageClassParameters: map[string]string{ - "skuName": "Premium_LRS", - "protocol": "nfs", - "mountPermissions": "0755", + "skuName": "Premium_LRS", + "protocol": "nfs", + "mountPermissions": "0755", + "fsGroupChangePolicy": "Always", }, } test.Run(ctx, cs, ns) From f3d6399436d906dfad949944537495f5972aa28d Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 2 Jan 2024 12:56:35 +0000 Subject: [PATCH 032/157] fix: add cache in sastoken fallback fix --- pkg/blob/blob.go | 5 +++++ pkg/blob/blob_test.go | 1 + pkg/blob/controllerserver.go | 27 +++++++++++++++++++-------- pkg/blob/controllerserver_test.go | 8 ++++---- 4 files changed, 29 insertions(+), 12 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 2ee325223..9f3fabf8b 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -219,6 +219,8 @@ type Driver struct { accountSearchCache azcache.Resource // a timed cache storing volume stats volStatsCache azcache.Resource + // a timed cache storing account which should use sastoken for azcopy based volume cloning + azcopySasTokenCache azcache.Resource // sas expiry time for azcopy in volume clone sasTokenExpirationMinutes int // azcopy for provide exec mock for ut @@ -258,6 +260,9 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p if d.dataPlaneAPIVolCache, err = azcache.NewTimedCache(10*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } + if d.azcopySasTokenCache, err = azcache.NewTimedCache(15*time.Minute, getter, false); err != nil { + klog.Fatalf("%v", err) + } if options.VolStatsCacheExpireInMinutes <= 0 { options.VolStatsCacheExpireInMinutes = 10 // default expire in 10 minutes diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index f2e191981..907e8c7a9 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -92,6 +92,7 @@ func TestNewDriver(t *testing.T) { fakedriver.Version = driverVersion fakedriver.accountSearchCache = driver.accountSearchCache fakedriver.dataPlaneAPIVolCache = driver.dataPlaneAPIVolCache + fakedriver.azcopySasTokenCache = driver.azcopySasTokenCache fakedriver.volStatsCache = driver.volStatsCache fakedriver.cloud = driver.cloud assert.Equal(t, driver, fakedriver) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index b1c6c13ae..f020f59ae 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -829,18 +829,29 @@ func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { // 3. azcopy returns AuthorizationPermissionMismatch error when using service principal or managed identity func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, error) { authAzcopyEnv, _ := d.authorizeAzcopyWithIdentity() - useSasTokenFallBack := false + useSasToken := false if len(authAzcopyEnv) > 0 { - out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) - if testErr != nil { - return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) + // search in cache first + cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) + if err != nil { + return "", fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) } - if strings.Contains(out, authorizationPermissionMismatch) { - klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) - useSasTokenFallBack = true + if cache != nil { + klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) + useSasToken = true + } else { + out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) + if testErr != nil { + return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) + } + if strings.Contains(out, authorizationPermissionMismatch) { + klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) + d.azcopySasTokenCache.Set(accountName, "") + useSasToken = true + } } } - if len(secrets) > 0 || len(authAzcopyEnv) == 0 || useSasTokenFallBack { + if len(secrets) > 0 || len(authAzcopyEnv) == 0 || useSasToken { var err error if accountKey == "" { if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 25eb5d707..48435bd45 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -1748,7 +1748,7 @@ func TestCopyVolume(t *testing.T) { } } -func Test_parseDays(t *testing.T) { +func TestParseDays(t *testing.T) { type args struct { dayStr string } @@ -1797,7 +1797,7 @@ func Test_parseDays(t *testing.T) { } } -func Test_generateSASToken(t *testing.T) { +func TestGenerateSASToken(t *testing.T) { storageEndpointSuffix := "core.windows.net" tests := []struct { name string @@ -1835,7 +1835,7 @@ func Test_generateSASToken(t *testing.T) { } } -func Test_authorizeAzcopyWithIdentity(t *testing.T) { +func TestAuthorizeAzcopyWithIdentity(t *testing.T) { testCases := []struct { name string testFunc func(t *testing.T) @@ -1966,7 +1966,7 @@ func Test_authorizeAzcopyWithIdentity(t *testing.T) { } } -func Test_getSASToken(t *testing.T) { +func TestGetSASToken(t *testing.T) { testCases := []struct { name string testFunc func(t *testing.T) From 941507634c45e496577278d9174fc2718bcb6b33 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 31 Dec 2023 05:34:24 +0000 Subject: [PATCH 033/157] fix: gid mount option setting fix --- pkg/blob/nodeserver.go | 5 +++-- pkg/blob/nodeserver_test.go | 5 +++++ test/external-e2e/run.sh | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index a84dc99eb..f7044c102 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -379,7 +379,8 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe } if !checkGidPresentInMountFlags(mountFlags) && volumeMountGroup != "" { - mountOptions = append(mountOptions, fmt.Sprintf("gid=%s", volumeMountGroup)) + klog.V(2).Infof("append volumeMountGroup %s", volumeMountGroup) + mountOptions = append(mountOptions, fmt.Sprintf("-o gid=%s", volumeMountGroup)) } tmpPath := fmt.Sprintf("%s/%s", "/mnt", volumeID) @@ -677,7 +678,7 @@ func waitForMount(path string, intervel, timeout time.Duration) error { func checkGidPresentInMountFlags(mountFlags []string) bool { for _, mountFlag := range mountFlags { - if strings.HasPrefix(mountFlag, "gid") { + if strings.Contains(mountFlag, "gid=") { return true } } diff --git a/pkg/blob/nodeserver_test.go b/pkg/blob/nodeserver_test.go index 6c7c64d29..91e4909d2 100644 --- a/pkg/blob/nodeserver_test.go +++ b/pkg/blob/nodeserver_test.go @@ -843,6 +843,11 @@ func TestCheckGidPresentInMountFlags(t *testing.T) { MountFlags: []string{"gid=3000"}, result: true, }, + { + desc: "[Success] Gid present in mount flags", + MountFlags: []string{"-o gid=3000"}, + result: true, + }, { desc: "[Success] Gid not present in mount flags", MountFlags: []string{}, diff --git a/test/external-e2e/run.sh b/test/external-e2e/run.sh index 8e952f469..31d132a4a 100755 --- a/test/external-e2e/run.sh +++ b/test/external-e2e/run.sh @@ -75,7 +75,7 @@ if [ ! -z ${EXTERNAL_E2E_TEST_NFS} ]; then echo "begin to run NFSv3 tests ...." cp deploy/example/storageclass-blob-nfs.yaml /tmp/csi/storageclass.yaml ginkgo -p -v --fail-fast --flake-attempts 2 -focus="External.Storage.*$DRIVER.csi.azure.com" \ - -skip='\[Disruptive\]|should concurrently access the single volume from pods on different node|pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents|should provision storage with any volume data source|should mount multiple PV pointing to the same storage on the same node|should access to two volumes with the same volume mode and retain data across pod recreation on different node' kubernetes/test/bin/e2e.test -- \ + -skip='\[Disruptive\]|should concurrently access the single volume from pods on different node|pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents|should provision storage with any volume data source|should mount multiple PV pointing to the same storage on the same node|should access to two volumes with the same volume mode and retain data across pod recreation on different node|pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents' kubernetes/test/bin/e2e.test -- \ -storage.testdriver=$PROJECT_ROOT/test/external-e2e/testdriver-nfs.yaml \ --kubeconfig=$KUBECONFIG fi From 3e4a7842a367c96052318318e51290ae2e2c81f5 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 2 Jan 2024 11:09:24 +0000 Subject: [PATCH 034/157] test: enable volumeMountGroup external tests --- test/external-e2e/testdriver-blobfuse.yaml | 1 + test/external-e2e/testdriver-nfs.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/test/external-e2e/testdriver-blobfuse.yaml b/test/external-e2e/testdriver-blobfuse.yaml index 65da74865..4208b118f 100644 --- a/test/external-e2e/testdriver-blobfuse.yaml +++ b/test/external-e2e/testdriver-blobfuse.yaml @@ -20,3 +20,4 @@ DriverInfo: pvcDataSource: true multiplePVsSameID: true readWriteOncePod: true + volumeMountGroup: true diff --git a/test/external-e2e/testdriver-nfs.yaml b/test/external-e2e/testdriver-nfs.yaml index 18c208aa0..6cbbc10bd 100644 --- a/test/external-e2e/testdriver-nfs.yaml +++ b/test/external-e2e/testdriver-nfs.yaml @@ -20,3 +20,4 @@ DriverInfo: pvcDataSource: true multiplePVsSameID: true readWriteOncePod: true + volumeMountGroup: false From f1f99e93d542c264c5f7af40b3254691207caef7 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 3 Jan 2024 03:04:39 +0000 Subject: [PATCH 035/157] feat: upgrade to aznfs 2.0.3 version for nfs mount fix --- pkg/blobplugin/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 2d639afbc..d7c89157f 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -36,10 +36,11 @@ RUN chmod +x /blobfuse-proxy/init.sh && \ RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca-certificates uuid-dev util-linux mount udev wget e2fsprogs nfs-common netbase procps conntrack iptables bind9-host iproute2 bash netcat-traditional sysvinit-utils kmod # install aznfs +ARG aznfsVer=2.0.3 RUN if [ "$ARCH" = "amd64" ] ; then \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.1/aznfs-2.0.1-1.x86_64.tar.gz; \ + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/${aznfsVer}/aznfs-${aznfsVer}-1.x86_64.tar.gz; \ else \ - wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/2.0.1/aznfs-2.0.1-1.arm64.tar.gz;fi + wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/${aznfsVer}/aznfs-${aznfsVer}-1.arm64.tar.gz;fi RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy From a3a2f1582ea2468b682bd057832681803db03840 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 3 Jan 2024 12:08:04 +0000 Subject: [PATCH 036/157] fix: only generate sas token when secret is used in volume cloning --- pkg/blob/controllerserver.go | 46 +++++++++++++++++++------------ pkg/blob/controllerserver_test.go | 30 ++++---------------- 2 files changed, 33 insertions(+), 43 deletions(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 18c7ba94e..b05eb987e 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -800,6 +800,7 @@ func (d *Driver) copyVolume(ctx context.Context, req *csi.CreateVolumeRequest, a } } +// authorizeAzcopyWithIdentity returns auth env for azcopy using cluster identity func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { azureAuthConfig := d.cloud.Config.AzureAuthConfig var authAzcopyEnv []string @@ -834,30 +835,39 @@ func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { // 2. driver is not using managed identity and service principal // 3. azcopy returns AuthorizationPermissionMismatch error when using service principal or managed identity func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, error) { - authAzcopyEnv, _ := d.authorizeAzcopyWithIdentity() + var authAzcopyEnv []string useSasToken := false - if len(authAzcopyEnv) > 0 { - // search in cache first - cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) + if len(secrets) == 0 && len(secretName) == 0 { + var err error + authAzcopyEnv, err = d.authorizeAzcopyWithIdentity() if err != nil { - return "", fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) - } - if cache != nil { - klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) - useSasToken = true + klog.Warningf("failed to authorize azcopy with identity, error: %v", err) } else { - out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) - if testErr != nil { - return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) - } - if strings.Contains(out, authorizationPermissionMismatch) { - klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) - d.azcopySasTokenCache.Set(accountName, "") - useSasToken = true + if len(authAzcopyEnv) > 0 { + // search in cache first + cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) + if err != nil { + return "", fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) + } + if cache != nil { + klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) + useSasToken = true + } else { + out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) + if testErr != nil { + return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) + } + if strings.Contains(out, authorizationPermissionMismatch) { + klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) + d.azcopySasTokenCache.Set(accountName, "") + useSasToken = true + } + } } } } - if len(secrets) > 0 || len(authAzcopyEnv) == 0 || useSasToken { + + if len(secrets) > 0 || len(secretName) > 0 || len(authAzcopyEnv) == 0 || useSasToken { var err error if accountKey == "" { if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index a32b5083b..861afc467 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -2015,7 +2015,7 @@ func TestGetSASToken(t *testing.T) { }, }, { - name: "failed to test azcopy list command", + name: "generate sas token using account key", testFunc: func(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{ @@ -2029,21 +2029,10 @@ func TestGetSASToken(t *testing.T) { } secrets := map[string]string{ defaultSecretAccountName: "accountName", + defaultSecretAccountKey: "YWNjb3VudGtleQo=", } - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - m := util.NewMockEXEC(ctrl) - listStr := "error" - m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, fmt.Errorf("error")) - - d.azcopy.ExecCmd = m - - ctx := context.Background() - expectedAccountSASToken := "" - expectedErr := fmt.Errorf("azcopy list command failed with error(%v): %v", fmt.Errorf("error"), "error") - accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") - if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { + accountSASToken, err := d.getSASToken(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + if !reflect.DeepEqual(err, nil) || !strings.Contains(accountSASToken, "?se=") { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } }, @@ -2065,19 +2054,10 @@ func TestGetSASToken(t *testing.T) { defaultSecretAccountName: "accountName", defaultSecretAccountKey: "fakeValue", } - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - m := util.NewMockEXEC(ctrl) - listStr := "RESPONSE 403: 403 This request is not authorized to perform this operation using this permission.\nERROR CODE: AuthorizationPermissionMismatch" - m.EXPECT().RunCommand(gomock.Any(), gomock.Any()).Return(listStr, nil) - - d.azcopy.ExecCmd = m - ctx := context.Background() expectedAccountSASToken := "" expectedErr := status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", "accountName", "decode account key: illegal base64 data at input byte 8")) - accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + accountSASToken, err := d.getSASToken(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } From c8b644ffba1ee8bf27e9f6c160239197005515a4 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 4 Jan 2024 03:39:16 +0000 Subject: [PATCH 037/157] fix: only authorize azcopy once test: fix goling --- pkg/blob/controllerserver.go | 36 +++++++---------- pkg/blob/controllerserver_test.go | 66 ++++++------------------------- 2 files changed, 26 insertions(+), 76 deletions(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index b05eb987e..4c7b7ed80 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -428,11 +428,11 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } if req.GetVolumeContentSource() != nil { - var accountSASToken string - if accountSASToken, err = d.getSASToken(ctx, accountName, accountKey, storageEndpointSuffix, accountOptions, secrets, secretName, secretNamespace); err != nil { - return nil, status.Errorf(codes.Internal, "failed to getSASToken on account(%s) rg(%s), error: %v", accountOptions.Name, accountOptions.ResourceGroup, err) + accountSASToken, authAzcopyEnv, err := d.getAzcopyAuth(ctx, accountName, accountKey, storageEndpointSuffix, accountOptions, secrets, secretName, secretNamespace) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to getAzcopyAuth on account(%s) rg(%s), error: %v", accountOptions.Name, accountOptions.ResourceGroup, err) } - if err := d.copyVolume(ctx, req, accountSASToken, validContainerName, storageEndpointSuffix); err != nil { + if err := d.copyVolume(req, accountSASToken, authAzcopyEnv, validContainerName, storageEndpointSuffix); err != nil { return nil, err } } else { @@ -727,7 +727,7 @@ func (d *Driver) DeleteBlobContainer(ctx context.Context, subsID, resourceGroupN } // CopyBlobContainer copies a blob container in the same storage account -func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeRequest, accountSasToken, dstContainerName, storageEndpointSuffix string) error { +func (d *Driver) copyBlobContainer(req *csi.CreateVolumeRequest, accountSasToken string, authAzcopyEnv []string, dstContainerName, storageEndpointSuffix string) error { var sourceVolumeID string if req.GetVolumeContentSource() != nil && req.GetVolumeContentSource().GetVolume() != nil { sourceVolumeID = req.GetVolumeContentSource().GetVolume().GetVolumeId() @@ -741,13 +741,6 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque return fmt.Errorf("srcContainerName(%s) or dstContainerName(%s) is empty", srcContainerName, dstContainerName) } - var authAzcopyEnv []string - if accountSasToken == "" { - if authAzcopyEnv, err = d.authorizeAzcopyWithIdentity(); err != nil { - return err - } - } - timeAfter := time.After(waitForCopyTimeout) timeTick := time.Tick(waitForCopyInterval) srcPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, srcContainerName, accountSasToken) @@ -788,13 +781,13 @@ func (d *Driver) copyBlobContainer(_ context.Context, req *csi.CreateVolumeReque } // copyVolume copies a volume form volume or snapshot, snapshot is not supported now -func (d *Driver) copyVolume(ctx context.Context, req *csi.CreateVolumeRequest, accountSASToken, dstContainerName, storageEndpointSuffix string) error { +func (d *Driver) copyVolume(req *csi.CreateVolumeRequest, accountSASToken string, authAzcopyEnv []string, dstContainerName, storageEndpointSuffix string) error { vs := req.VolumeContentSource switch vs.Type.(type) { case *csi.VolumeContentSource_Snapshot: return status.Errorf(codes.InvalidArgument, "copy volume from volumeSnapshot is not supported") case *csi.VolumeContentSource_Volume: - return d.copyBlobContainer(ctx, req, accountSASToken, dstContainerName, storageEndpointSuffix) + return d.copyBlobContainer(req, accountSASToken, authAzcopyEnv, dstContainerName, storageEndpointSuffix) default: return status.Errorf(codes.InvalidArgument, "%v is not a proper volume source", vs) } @@ -830,11 +823,11 @@ func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { return []string{}, fmt.Errorf("service principle or managed identity are both not set") } -// getSASToken will only generate sas token for azcopy in following conditions: +// getAzcopyAuth will only generate sas token for azcopy in following conditions: // 1. secrets is not empty // 2. driver is not using managed identity and service principal // 3. azcopy returns AuthorizationPermissionMismatch error when using service principal or managed identity -func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, error) { +func (d *Driver) getAzcopyAuth(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, []string, error) { var authAzcopyEnv []string useSasToken := false if len(secrets) == 0 && len(secretName) == 0 { @@ -847,7 +840,7 @@ func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, stora // search in cache first cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) if err != nil { - return "", fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) + return "", nil, fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) } if cache != nil { klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) @@ -855,7 +848,7 @@ func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, stora } else { out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) if testErr != nil { - return "", fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) + return "", nil, fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) } if strings.Contains(out, authorizationPermissionMismatch) { klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) @@ -871,13 +864,14 @@ func (d *Driver) getSASToken(ctx context.Context, accountName, accountKey, stora var err error if accountKey == "" { if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { - return "", err + return "", nil, err } } klog.V(2).Infof("generate sas token for account(%s)", accountName) - return generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) + sasToken, err := generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) + return sasToken, nil, err } - return "", nil + return "", authAzcopyEnv, nil } // isValidVolumeCapabilities validates the given VolumeCapability array is valid diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 861afc467..02eea3d91 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -1538,10 +1538,8 @@ func TestCopyVolume(t *testing.T) { VolumeContentSource: &volumecontensource, } - ctx := context.Background() - expectedErr := status.Errorf(codes.InvalidArgument, "copy volume from volumeSnapshot is not supported") - err := d.copyVolume(ctx, req, "", "", "core.windows.net") + err := d.copyVolume(req, "", nil, "", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1570,10 +1568,8 @@ func TestCopyVolume(t *testing.T) { VolumeContentSource: &volumecontensource, } - ctx := context.Background() - expectedErr := status.Errorf(codes.NotFound, "error parsing volume id: \"unit-test\", should at least contain two #") - err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + err := d.copyVolume(req, "", nil, "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1602,10 +1598,8 @@ func TestCopyVolume(t *testing.T) { VolumeContentSource: &volumecontensource, } - ctx := context.Background() - expectedErr := fmt.Errorf("srcContainerName() or dstContainerName(dstContainer) is empty") - err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + err := d.copyVolume(req, "", nil, "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1634,42 +1628,8 @@ func TestCopyVolume(t *testing.T) { VolumeContentSource: &volumecontensource, } - ctx := context.Background() - expectedErr := fmt.Errorf("srcContainerName(fileshare) or dstContainerName() is empty") - err := d.copyVolume(ctx, req, "", "", "core.windows.net") - if !reflect.DeepEqual(err, expectedErr) { - t.Errorf("Unexpected error: %v", err) - } - }, - }, - { - name: "AADClientSecret shouldn't be nil or useManagedIdentityExtension must be set to true when accountSASToken is empty", - testFunc: func(t *testing.T) { - d := NewFakeDriver() - mp := map[string]string{} - - volumeSource := &csi.VolumeContentSource_VolumeSource{ - VolumeId: "vol_1#f5713de20cde511e8ba4900#fileshare#", - } - volumeContentSourceVolumeSource := &csi.VolumeContentSource_Volume{ - Volume: volumeSource, - } - volumecontensource := csi.VolumeContentSource{ - Type: volumeContentSourceVolumeSource, - } - - req := &csi.CreateVolumeRequest{ - Name: "unit-test", - VolumeCapabilities: stdVolumeCapabilities, - Parameters: mp, - VolumeContentSource: &volumecontensource, - } - - ctx := context.Background() - - expectedErr := fmt.Errorf("service principle or managed identity are both not set") - err := d.copyVolume(ctx, req, "", "dstContainer", "core.windows.net") + err := d.copyVolume(req, "", nil, "", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1710,10 +1670,8 @@ func TestCopyVolume(t *testing.T) { d.azcopy.ExecCmd = m - ctx := context.Background() - var expectedErr error - err := d.copyVolume(ctx, req, "sastoken", "dstContainer", "core.windows.net") + err := d.copyVolume(req, "sastoken", nil, "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1755,10 +1713,8 @@ func TestCopyVolume(t *testing.T) { d.azcopy.ExecCmd = m - ctx := context.Background() - var expectedErr error - err := d.copyVolume(ctx, req, "sastoken", "dstContainer", "core.windows.net") + err := d.copyVolume(req, "sastoken", nil, "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } @@ -1989,7 +1945,7 @@ func TestAuthorizeAzcopyWithIdentity(t *testing.T) { } } -func TestGetSASToken(t *testing.T) { +func TestGetAzcopyAuth(t *testing.T) { testCases := []struct { name string testFunc func(t *testing.T) @@ -2008,7 +1964,7 @@ func TestGetSASToken(t *testing.T) { ctx := context.Background() expectedAccountSASToken := "" expectedErr := fmt.Errorf("could not find accountkey or azurestorageaccountkey field in secrets") - accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + accountSASToken, _, err := d.getAzcopyAuth(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } @@ -2031,7 +1987,7 @@ func TestGetSASToken(t *testing.T) { defaultSecretAccountName: "accountName", defaultSecretAccountKey: "YWNjb3VudGtleQo=", } - accountSASToken, err := d.getSASToken(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + accountSASToken, _, err := d.getAzcopyAuth(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") if !reflect.DeepEqual(err, nil) || !strings.Contains(accountSASToken, "?se=") { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } @@ -2057,7 +2013,7 @@ func TestGetSASToken(t *testing.T) { expectedAccountSASToken := "" expectedErr := status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", "accountName", "decode account key: illegal base64 data at input byte 8")) - accountSASToken, err := d.getSASToken(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + accountSASToken, _, err := d.getAzcopyAuth(context.Background(), "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } @@ -2078,7 +2034,7 @@ func TestGetSASToken(t *testing.T) { ctx := context.Background() expectedAccountSASToken := "" expectedErr := status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", "accountName", "decode account key: illegal base64 data at input byte 8")) - accountSASToken, err := d.getSASToken(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") + accountSASToken, _, err := d.getAzcopyAuth(ctx, "accountName", "", "core.windows.net", &azure.AccountOptions{}, secrets, "secretsName", "secretsNamespace") if !reflect.DeepEqual(err, expectedErr) || !reflect.DeepEqual(accountSASToken, expectedAccountSASToken) { t.Errorf("Unexpected accountSASToken: %s, Unexpected error: %v", accountSASToken, err) } From 237ef66f160430f43b3c605c3517cf3211c473de Mon Sep 17 00:00:00 2001 From: weizhi Date: Fri, 5 Jan 2024 19:51:37 +0800 Subject: [PATCH 038/157] feat: support workload identity setting in static PV mount (#1204) * feat: support workload identity setting in static PV mount * fix --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5933 -> 5969 bytes .../templates/csi-blob-driver.yaml | 2 + ...=> workload-identity-deploy-csi-driver.md} | 0 docs/workload-identity-static-pv-mount.md | 178 ++++++++++++++++++ go.mod | 2 +- go.sum | 4 +- pkg/blob/blob.go | 27 +++ pkg/blob/nodeserver.go | 18 ++ vendor/modules.txt | 2 +- .../pkg/provider/azure.go | 20 +- .../pkg/provider/azure_controller_common.go | 17 +- .../pkg/provider/azure_loadbalancer.go | 2 + .../azure_loadbalancer_healthprobe.go | 39 ++++ .../pkg/provider/azure_storageaccount.go | 60 ++++++ 14 files changed, 340 insertions(+), 31 deletions(-) rename docs/{workload-identity.md => workload-identity-deploy-csi-driver.md} (100%) create mode 100644 docs/workload-identity-static-pv-mount.md diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index 414366591599fb669e0be26d04db7daa972b6eee..217a742e8016d55e189495e94ef47f7735c1beb1 100644 GIT binary patch delta 5946 zcmV-A7scqUF3~QKOn)BylvXo-;aEzli6d(yC%yZ3Iv$8zNjQ@L3xJAM*Wdj&07&s! zqGZL+^@KNx$;IvhTr3v*08Lyrv1~yshm!^H=H(o7*;r!F{cdlq)oQiQPEPc{tyU}h zZ>xQF{9XIx^yKu-QM>)-?7LR`r2VG#9ct~5h4K~%iTQV}Cx6q*4(`9CVV`^kE(oI? zv}o6`@2~!Ju>>J&4hWkQU+QNUe*_%RTbE7HSTc@hfG)>Ek zHCnZo9`d0jT6X;}up5BbuL4-V{@bc$*Z;{;`>_6>qY)3!pi@I=f(4AgXC36(yx|d> zGr^{^VKYy?b${7JK_SG!XM#w^mmSo+`%yG)L7I^iX$XmDE9Ig7_5;@)GMCuP4(d+_ zOb$5+pz5V9$dI!IQThRR0RbjN^DdYGryxNn5{jPX$DLx5i{r*w<7g8}mn?vS5JS!; zP>o{L#aCSt^o|g`XPj3bQ-{LH^ig zhPmmGAk}XCv}ty0w#x!1+$Dy=#3lBndQz*sdWA+X1rF2(BpG595aY;W9FPE;gB0rV z1W+KrL0{%TH5o!@NQFXy*l^Q9zy4Nxg)C(DG;bMxyyXX28ju0@pjH#W z7sQ4O+hzfkYJP1FSV9L$9sqiUeo;eF|9_1_ScDC*KwpUKsxg}`4w^EKmMmzXF-SEX zQ|t;rWU3ybFPQ3wfdKV4N9+r2D0+-a&RiFGM}K{3%dExKLrY-v3Mpn0o1$^ZLdb78 z=MjCz_4J2An?R?Y#_mJQ^2Iz+H#oN&}eA$g1Hj^+Ka>6)~%S#svjUT8`)|CF3iYX)lz90)g90Uuv4M@xxmsO?T zX4D2>#tT5tPE#!cKO}AW9Kh4IT(jmU`5EXhB9=meXW4EbByh{-L=qdjiuxGrKtXGH zgL{c)ic>Y8%=a0W0aY(0Lp-2pwdfEQh<`H58Ix$TL;|E#OB$<1uhCr^Y>L8UiQpc4 zz6+?M{?34O(1XH#AwCHu>g`smjUH=xpvPJ*u_RW)R zv)4G~(89+ybddTWmYKgAQnJRr^gLh9?yAp9{O3%_0mv_m->vk7oD;E{ADacPqkk5Z zLj<0<;3y>!GmabvYF;IBzz6CeM$yM{Kk8mXN@6*&#gK8?K}SEG9iJ3$eRIIBoX22F z!E^9kVq?)k`mcaq(~nrmL&qqj=;5W|XXs)I@hg|jl+3#wbgFR~Nfw4&(SEP^@`x1C zUy6B8?@gzz)<+UPQyk_a$@Z#5{oUQavDcZq>6EDD9I7~_{l}nGE92oK$z0%}_SIM{bO*~0s_UJCGZz|Z4drPGs)rPruX-zN_1--Z zQbaFcIt5#HP<_D0_8gqRg?c6Y;o|U>Fn)C^))UCo^wyeJOX~59DJ1>2Pk&uP;WKjs z4?e*1!nQ$(Ux=Ld9iUQ+L$t^}eE*5!1tzZ2P94-PQaPv9RL*fuX#iDN-@0rv4Fn81 zyI-z2our;>goFx-T{nVXfyTfqmqxwaIBA^Li@F^Z^gD_>9yQvHBhyiU{D|+beW34% z3xmK*^z>OYy|py?TE^M6migC%s|<_2|%?KupwoOe*u)22K2m43e=9*iaS{65tn zU6Rnnp0dA&z&#>_v4&c2-n_}{-e=W@rH7suR=`gsg8!gX@dZnJ?y%X{9nnroxKJ0F z^>m?F|ENzXM(%1;z%t-k7N+7oXMrD`>PCQ_UpSF)O>Iyr;NAFU>3_ot*L#pgPmCp2 zXC)(xHDmX7QU-p*7NnuoSLjMGHncM0E@q&ZsVg3X=*y6)=^$hrSL*Q(lqrQG{_z@hdv8Cze~tQsch|4cFBhXhfAIb_>WxO% zBV~puTRK!PnhEtF{(rmX5*pmA(I{2wg6yE?0`sOzCe83kO|NOmSqC{VA(+-`%BXW0 zc2-JLt7!sM_PZgOSLj!!ZE)=VhWb-&L|Rcv)szV|c`cY`bYl2dpLWMe2YLI>xeIatL!GE@+8j|RDmHdUc^srvq zk#%T0wXFYFg6C`BU2|}K(;GMBy?io_P4S}~?D@k3 zYK%a`HCDDQdVhRWEE!~$CBj|typDZ8W3VesIV9-uv5qWDZ{z>#daHc>-_F7Hfs2Op zpBM_-u>OyZ&rUMy|LpV-|9zhJ@X-7xS`e?Jt%E6XK`wnb_tdo5bLgOdHXk2rH4WCg z_c3*}(Lh5tu(mUF%7BJ@c(9NIQ$hjNwehFm>d1;;;eWuDs9~}U^jd6uHW5b!dA$J6 zi38|QWua0VogM4{63Qg!MO3PY=NSKRha;Kehi%X{q}EtE2jDeV4&7&tuA7Gm1@=F&P0E zhqS>~tbYSL=-~lHYdiIA8mO?7J{?orbj)hca+Zzryo_Hr!KZpjsO<($*=R~Qvzg~J zs!t9HUwaLo(_@b!9ue=;=*wTMawduquJRugwp^D#(eDA1sabt| ztf_-fL(H3642y;k%4Z|VPPmeiK??I4N%n8-xqoXQR)kYu)+~EJaNSjqZpwJ<-503% zJNt1QE%C8q4zs~FMK~9L)15e`Gg#XXtzvX)dP_!=9@tCk+;6Fs@Bak>lO5AS*6;sE zt)sKk^!|U;ZnX~f|Fg6^LY)rkYQx?GDr?DvFzoxHxoFp-_-E|dQEM$yCb4+XaC|=~XQtbq zC`mOlAR~b4p?0I)N?5kaCQ#K785)W1@q!H@8gClIkE1cqasDeOxni?55_XT&lCgl7E5p z3M5lO$P&uygf2N@8T)XPpcSNyBtBN|ZPK`nK6g4@W5v1vTN(C4&eRz+evXhjk9N=~ zpptmdH7Cfm<4c4^V#29d$rdAyZQYnBD-pX*W+H~4d5*=9gFfLk>*;0HNGCuv;!M1x z(hApU2h|gkYx+l7n&|N{_ii2T&VMFoiD&5Xv2G^76;P3%{n1SC0|;S06G<^p(_Xh0 z=STJ2Lv6oe$KfEv`S(9ubi1S8c>Iq7Ko_tB+=4JoY%@d!S7ZSaC8;cs=k23bOBB3^ zd&#k7a^kFLk3Uaujw@gt$HzXge)q-sX=@$CB;Z1xw_8O(VX)7dDdYb9XMaW+AATNz z?PB7YuwRY=po{DmFoGY>aOxd05x5R&hAE^C!R))N3=I>hqnpGdPBTqQ#zIFPbtE$< zAto4ZBIr>*a@-8}iZ76sLwMqK94to#c1Frt3;{FjCM=NlQIg?k(xUjgWzvW%D2r6Y ze!se64eI=|O#|Q~EmV%jC zEB|RcyNLf90avYjb?hMiDQ-7Pzs~ioAoKcUvR6^hW6aB&hS^cDcf((8yR#N$A{VAX z6z@Mk9iI^@&%giSa@@b|4Z6eY{@|u)M+~#JR*OZKXf;}$VzP-GuYY37#*4Iyx`i0b z)96TAw1I1fq-AmsW%;qayk>28hn*v%vG^GDWWhePME72D8$CISE}^$0nWcDe4ie_Gbg}N9LW+nPfI| zJLu+exCWB_HZlz$t4Q}h-MPMGX9Wb2A*@4oBDj`gbw?ycSE~%^)q@ zUp8=9g~9Fl+lIZgP`$st9d|$6UR}Sxz3P4LT~(!;PZTVf2KYVJ5lZKwx=vgfOKjh1 zC0IHYhQZ$8bANwyJ^0ug+-R{MUfld#avlO{C(3o2nnj`LXu^&E2u3$g&}Qp#6?uEM zGgG#6W}tanAN_;hDilQ^(i=v zKG!j_pYvi-#DaDRqSGQ08UkSs^7=3)zrWcKb} zE5uvq=87P9cdI>-R<-j*RmWMO+wD1=X@9rom^v<4MuAy`1_CVLobhv=bpH33^;bo6 z|E(X^V^&L+n_;xT$z8~wK3V80l!yF!EmZMyCd^>cX6NRO!?^#ugzI?4@|v4WuNBNr zJ4dl9-haqrUN4u2Ft^zq_Qp>6F0c{VhAm-J%rO(#DUbLRiEOrWVK6tr!AmDi{DoSD z{GZUY-u+6LP5J+)XJ?uGpH{p5=8*sM9L=(C-8@rhmzTwp)#p{oNhVQL3=dLO5^;=)_sFM!d{Xa(w^Zzh-jE-T4^gk^f`%L`H zAb;FRZ3Mc)dMr7XFbm7Xl}$|O%oJZoW>B_}JeZ*0il9EKYOXgs1q%V>)Sir?0 zTkyrQ1q%oE5(#}F3yh})rQmLcB7nHqJe?vq>~~Y{r1g1(2|$O6XjhMNz@qcTHVK4^ z#a&Yeb2(>b{@Yk zeJmx3<)G(1nL1mr*3&SX*SmS}?dCV?@$5CTQO(Tdn(W5#eiSRZoW%0gVn5_?lz;nJ za~{LYJ&aMandf2ZBA5;(RcS>%!w4W|BJd~|? z@CX}X5s}-^z0tUTJ=lp@=6aw~Vt?6@9qnUiM;pg>w3ophZ9BSkVcs0gB6FaO(71PV z`|;vmx7WkoK*vmP2cJIfG`8FgOrMJvi&WW*?3)yGQ!f#1Ez3+v^QaiLDps3XL4s_e1Ff8RJmn* z{E*oD-<{a27iQI*HHBw!epER)r;+cPLtA_qbOm`UxQ%VUQn!&nUJ{DB>?na>gZlQ; z7{||ZVBvXY_0e=N+{bc${{0W#QU7yqbUW(3za3v+ez+Z8-29x!ke0DEnTK#6dl8~m zXm1+K8!j2&??56f9&_Nl7=Kkpz3-UYcv{8s-6<&`vEvsEw-{GK_)H`?96EY@Z2E36 zBUCg?LnuW|e3hhvxO3`@tVfXq-Bkw`EX6}L!8247n0;brQgMJXiqe9Ol69F&I4vs!j-Z!u=A zQY}Be!#9U8d&%#*sU-Ry=V;(}u(Ytg%GvrtMr)1ft z0t36%Bbf;)twh?gP=94BbAPuIl|!M(p-?1;s$^T-j5WPG9x72P@wZDQO4@3uF0NAo zu2UkXnV;Ls1L)4ix)wgzT0QAs8Y<%OP%HB_YGrnfu{=kO&APK>;W>>nAJ4G)@y55r z(P`9L%N4Sq`)I@c>y`SX5qYV|Slrp}={^=t{E|38Jz}bFb4FT{3BU%n4k`&Sn&Xm{`T{_gg2G@s1Q$NNqcRR&pYu zU#~d$Ps;r#WfA(+1lC`E%h^hlQp}gS*sAYidtZakySe2)d}#?(L0olhIZYCDV{CO3 zrc8`=O?-7ziGNQ5$2!m_xvSGh$ql7jYYd;=S=@<(61(Tg6LlN^!00Y)m3rdWGxnr< zx;55Y<>^S}HmrWsDlHzRX5OKo?_~=5wl~W*NMWV@p}IKc_77{VWww^JGGnhwvFTr{ z3bMTZhflC=?d1~i4fTI#$7jdc`@i3`5BGmROUnqb`hVyRb1|j9xIZlZGe7e)VlJc# zgfuh`;9_7FSGzdpat-W2AF}YzeaVJ2>q`%q1AuPJgNUVYTpOlZwG?<4$jI{ zO>I*Vt$%dwW)bBO!gjF`6~|d|Ot1hdcbK_wG_O(JFC({kShgT)v)b!`o`le5j5~xT zmqZZ_T^qNTNvm0wRm-o|w}d)`&c4Q?&0KzLO+r$;m;45k|9~ySRcnSaPF3&JVsl`d zE5fUkBr0tay6p2R?Xru_)`UqC5kCZe)J+N(ZNU!?SFttKZ*cX~dh6#lGLiI9gf{aeX| z>_LLJo0tEAJ$5HSsUp)44~N@>3R>0k|5Gjj*>L`EA0HoO@BcYDIXj&HpQCL!|3B+S zpnvNSzNIbTg%p2s9=;WopS(EXS(JT&FdWqU{#&{J4+Q`l0eFfhc$5Bb<$nM1q;>HB zK2OU9Fw%xzyc1+H7Yh~1+7sFa8*=-Lqqb0j+ulu7?OvkPMbBv%~NIKTCTt{T~~Y2hD%b c{D*dEhjwU(_RQ`70ssL2|7YSXOaQ0=0E4l&rT_o{ delta 5893 zcmV+g7y9VYF0C$*On;vIkXAE(;aEzli6d()C%yZ3Iv$8zNjQ@P3xJAM*Wdj&07&r( zlqgxTb3NfrVsf#&02hnJK0p(fPAr=f%VA^zta&lVOf;6*bHCeLYqeUf)8k|HZ>!bH z|J!Px9(~t7J~`^NJMEJiyR5(xF|{Ev_Udh5~&8Vkzs4A8}RfVA+`eC$CNo0DcNMG8XT*-ClHzk|?qN7NA=oqwaHX|cF_hJ~LR?FzGe4V?V3 z%?vZ$Aw{a)_-WJZ)@+xCPP9vm!ih`l3;Cp0d-V!k!xR|!HXuO}^#M_aJjwuZuo(z0 zA5Q><931pz4!$Bo{25Xr$|VX0(=M6|5%6v^#dL$vrbBJs6qw)frcM1RnS~6jfI5~< zeSwJ&tbZx-SqhmIFtWe`;wfD=YwGzZbltJ$ri*_4t@a98NbhOhGWvMS53tlAL+nAV z#(^z}4d=E^Ltn`GwHaUmT_jit=oR`!4n_X|HHu&n*1#NnA+9UOth+d9N*P+xuz|)P zwg04$}er{wYYj{ag1Ie$t%w^ z_>JZ~rcarg{zzz3=#&`lfT+eM*v?OiM87f=lxSQ?PHkynNYvQ(slf6tB@@_j)IY9^ z#2ipS9bOBlbDltT(ZgdAjf*GXuETJt0CZ<$S?D6RAAByYDfKZWQ;3IbK^A}*a2hfj5T8>f zs!G32eH#LiEC4k-O}Pkyh_n?j08iO+&03J=XQ;jiUvdGSWxF94z$}{)L2T?w>SM4& z39aM}?gg4jPUU>kAfQZyzI-VtVxf;#i+>Jbj(qAfN(GuM5eFgUlE!k;D|8nco1`#V zBDlw1-~#H(zjGj6^dNDc^G{rgdb`zXqsLki=&@EyEs2$|8J32WOR1hN)O;nYx{I(> zV3vBR_Zp`hng`g1E|MR_((_kCQr75~z8A>ZUG-Us|D19$1o4HkyOo|0bHZ2iV}H}o zb>xC_2*(o_9H|5n#*sro&Z|HU1mHV}`sm|$5O=R3DY1;$d_Y|Ohz{T2FG{zok3p{o^A{P05aGjg#H$t#!6q|CcrbfR!+N#>fIX}_0z zc|`E|FUh>8_NJ3o>m!MtGWb0No_~h2gHTF=SLoJ>dl|-CPOSYzVu^)RPNVZg8u^X< zYfT-VPKjL3k&2Vre+*K!QWiZD)CCr4Uxmd&cd+cDy4ooib)k{fP@a;YdPI>1vbV%m z@81(Hc>DsUQ?NxB)rWL!&%p^@s8_-tEsj76<5#C*J%LP3ZLLMMBp<(+LVr?k2fj;u z_)OiVCshpE)D(9%6G{Bcw-@0@% z4LOV$y0!@HdE{%G-aojkmo4R!j{W?jVKY*HvbejSU~S>VNe&?p2G->c^5T3WxC@)>h~Ms!B}7~ z82AdLM^d`jllIpLm`6C*)==xsn>R(>`>fiq^w9Id3izo+@E`t^f5F0@J9PGSN3@d? zHtHh1p3WufAJr*E%Ux{>ScFW;!j!*fGz{WX-8Eq67e)kJ`8G%u@PBT6vkYK`>pcjq zC&mKHvyzs@nznnp83VszbJ9@iD{>`h8(JE1=QEJZ)FqE${P#GVPRV^2)vv~5YYwg_ z->Uxa>A3gda-}Kib@U3YbP&>xEBW{bpGt+o|M432`foqIe~kvicUP~`FXz|8!SMZS z)W5#Ix|U{`w522UqJNl>50bxYF7d;AIU1=-T@YQ=TwvC8$)p)Qsi`$B80{hlCItJn znl$QMik+3xRBD=lF9yAc%q#RORW>+we?x<*GNRAVdvdW$!%H7v)Cr1Nm~%4o!9gDO z@k|J1xS3=pUZQZ0AC&wF;|ZwD_#*4@)XMDsZxOqdVlB(5pxB$9o2}$peN-o z!lj1w(vGY{+o|RKzXH5K`R5xB9JliR-;>jm-f`e*a; zu~t)H{rdp>jxrkj$PKLQ43#pV;2s_<c)c+N4wF~87*1Tx$B&$*ASz1IYt}t09 z9BW5!qh@AYfh5ENMtm_v^}liJZ@iuzfc}4UR78I&|C!WM_5W8#_1pR`gQ1>BrM;#V z8!uxr27k~FX^qXS1H0(q0mW-O^KBZ)*h!s^eOq_TYtM3)we!4;UpK|4d`W!U4V|*l zlyGKKFQC3UIi!5;MYsSnVbST`fu#HYamvHGhbFO`yh1PRgi8< zS>oO2$o!pyB#xH)*a?UEV4Ead2*9aM4Er-!+YhZ`baHx2Mw1=bOY7Wksg>{lhJeYA zX(8+P|4yrOdXnA$JFPcw4)*`Ev^(NEUDQ*Cy$8Oir4zz%5b)-rU5n$NiDyTywOE-X z;(y^s+k=LR{Lp14d<{jrh4x*k2&sHOL zYo+^WPhSk$i2X)Q<(RJv7Oui38h@mnOMghNqV%x_&uwJM`!z#IZ2(!+sT6{wPb&MM zYuGw--Tp*rs<{Da0hAAwv(504VjpjceC+j2(_T-eFl***QZ~~jn;&7Jcb0g`r^|%2 z%@UacN&02;XNGy@l6{@RmSD&@##bLcYl2bUMar97CTMAOkOaI_g0w!a898<{bAMAV z)p=s+Kzap|nIL2d<#j@rp0J#KxJl3qDI=+mRd|~=ZsX6LP1jhl&cT+3{fJR{22GwL zB+sK=bRGJFc+k@)$hG53g+&s=nOMmdBTj7HgeNN%yG>^zMxS|(`G|o!;Wg{oW!1;8EB4+EeJ*a2oilqR+rqJk^ZfJ90v4aHfz z(`s?Ud$<=2TRJDsiud@l?B=)vR&jhB5bO7VpPjVUK}CYp`8RJRSBcFaZ7{{{^(*M>Cvxhjaw4i<(giX@gVyE-yo)RO;v^@kr84vy!n; zkw+EDEJ%nBhU*A=oR6F|&AsFcgyj&HdYuHzk-^T$Sc?&0j@^_6((a@gjwdaSzgs$u zxPr3CMC|vgE7qVcF54{dj(?hBUlgn8iL6E_6G#oIzQ$_B9`OQz&R4Ml%EX!3u*q_p zu4;SOXZGYGsT@8jerX4ZL+FC6ZRif2sb$`udQ#oE{%G!&x zin>J@^waoAYTCe+L(NEjbC{+6D@f?dI@alYPf?F(wLfc!KXUH^ zMg_I0+eJ4Qqg~j@%73nras*P5vN54fw<#uW$lzwqfX}Jx=)A_H&4mcwgXrcdY1LdZ z0cvcfcf==xV7CV@UXH=0zQeodsFi_ExTv47;@&;X~hcIE6D-o>d_ zsVvR>K+;OmY}AH6x$icc<)%cmwfT_OHc65hs@L;^7c=nl&CO_hJG#F5*T2p5i&`v& zH;2@?zii;J3WMA8w+(w)p?ZIHJMMkBy}WvVd)fcozpP5Nm?&5>4e)!SBc#qlb(Od> z7TCU1O0aY)G=GEr;pf5i)$n6~c%#IAbbj-5$$1EbohsL9Di*nPY{f;L-^ ztH|54otg5Ta|1OZO4jmzzY6E>8e>;-#<|-R<-j*RmYjp?e-kbw7*+(>^m-4T7g-_4>?%C7-eTF>HP06 z>#vIE{#!q+C#;q%H_fQQ$!+9Moh(!p%0qFz8ddz9ay^)=*|~Y+(C+^pVJcp+yka`j zYX!5@&VON6#T$7n>gDnf<~F;--qRGW$>;yHTb)Dx&vP`(vN9<@T<7dJ?tav{i|v}u5V{x-bUokt zWT334FPGx8L+%b10^9Rz_&p>HoTTV6pw7Ub(tj&IUC>r2V@oj>g})jZ2+gA2`Yy>7 z?4|9(N9w?;o^B-Pg$sR;ihR%y=HLOt?q5U+RapN%b>K-CLakr_N3GL0`TO7AoE#t4 z|Fg6x{||#l{W0v2{->m4pNW4tggdE?P*qru1;YYnQJJ{3iHSec#n-VJlrLn@^O1cY zbAJ@2q@)q5JqH8n`%Q3%*#kpmAU?kMD$(95`! z*5?tX03DylyLy}h7N0M+Ng$Lg?wUGS$T`z1XZLKnB-b-0Es>oUp6c5Jqyf|m8S!V) zWg^6%4Q4)FJ?`Iw9ja?OGiSUsgEzoDo_`OtSXx*0?*j(W6%vLpN^~`f(ANMITXOTj zkcxqCgo$eOjqfCd1z{q@u9Z~RCsLAH4r<=hsk4T)o`u=G-t~iTH@{JhXRn!!a%L{p zWH(0lqge6fBvv37`yq#;+{aq*80PL_jGIk85BpBR9ox)KbyI$!2A`*WqNVZ`Nq_Hx zxG<9krJT#Y)HT}cza5+pZ{J;C4R89xp4mlO_o+{O0Sw!MEDA}A8q;7nzB#|VynTCl z_4eJTu`v*woqHwInG1ayxl4Hr9$`%^CUX0^e?1;t4R<1zyB?^NSbk)keGIL$acrHv z46d{7=+=dK3p7pUKpCNN|K|4N`G3D|uSWf$ikaRHKYiS3Y=s+`N@+@ZPa@@2qthFV z&);75Z$FKC=QsV{+ns3Tu7}DI+Z;-ewv!d|b%6v4(#(V8`&%xx@wymgjHOuw%-{lH zZ^Dv*tjn~|<<2Jfo+GJp%lP;qvG>0_u~#k3syS;C&*H38IXI_~@0vqvzJCn5g1i}S zW81IPZ6r{X1XGtCCGcxd-(DJ%_;~>=I?t>=nhJ&oSj^A9|Dkt1_}ss~z3#ui9ba91 zxE-C}{9MG4m9aIMhj5>G5#naFH#O!BmyGXsATbt?I&fx2l~eCK<~E*Gv3z$*3P|nv zhT)oVrG!sMf}^3M$H!*ihJQ2S^JZxX#l*x{Nh(M>XTHdK6lu_1b)aD>9;ykRp_)MN zbNi|z$R1tOJ+tSEwMg`7B8RKMbG0)^-BNmuIA`Dg@Zr+9l9yCtO{RtA|svY%_s@-RhC_gp^hyZCR+YmASuLiOQi+aS7VC4XG~t4wJ5A3aa)T#SSwUjpj9~2W8|o56k96ZdQ98(323_lro3- z=_O&Jp=#syGHEr-vTDWE`j+?(@n>IS(dI5cwk8p&-AjJM$$!8W(W=!$8E2|@YKb|p z&3_f)RZ0@2wuxN!MV0py{S}MFnt3n#cwbf3Mi=V*H>lYPDq_!C_DK|)`seNPgmxT- zS+71#p+38IlJfoBx@kWG{ufY6xrwN(koKyNzTM<<8#|8ukr=l^Hj z2vi-yx3mSkkm66z!?&XH(-$W^i?TNe!$HmOzm@C%NC2=FfTwtZH|hUY;rAbpTL=H| z^Rz+$BWvg-J3%gU(Wpq)p3pYfklSAzwS^Mg_HLqT_Y!3;|5?{fqqm5YnUJ+yY);}> zfRC$itl>72PNRF+^!vF7)%w?I+w1@BPXZhD|H+%2{_mU|AAbM;S=x)~|HPm?X#Rud bKeR(Tv_m_zXKw!&00960bk( Note: +> - Available kubernetes version >= v1.20 + +## prerequisite + + +### 1. Create a cluster with oidc-issuer enabled and get the credential + +Following the [documentation](https://learn.microsoft.com/en-us/azure/aks/use-oidc-issuer#create-an-aks-cluster-with-oidc-issuer) to create an AKS cluster with the `--enable-oidc-issuer` parameter and get the AKS credentials. And export following environment variables: +``` +export RESOURCE_GROUP= +export CLUSTER_NAME= +export REGION= +``` + + +### 2. Create a new storage account and fileshare + +Following the [documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-cli) to create a new storage account and container or use your own. And export following environment variables: +``` +export STORAGE_RESOURCE_GROUP= +export ACCOUNT= +export CONTAINER= +``` + +### 3. Create managed identity and role assignment +``` +export UAMI= +az identity create --name $UAMI --resource-group $RESOURCE_GROUP + +export USER_ASSIGNED_CLIENT_ID="$(az identity show -g $RESOURCE_GROUP --name $UAMI --query 'clientId' -o tsv)" +export IDENTITY_TENANT=$(az aks show --name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --query identity.tenantId -o tsv) +export ACCOUNT_SCOPE=$(az storage account show --name $ACCOUNT --query id -o tsv) + +# please retry if you meet `Cannot find user or service principal in graph database` error, it may take a while for the identity to propagate +az role assignment create --role "Storage Account Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope $ACCOUNT_SCOPE +``` + +### 4. Create service account on AKS +``` +export SERVICE_ACCOUNT_NAME= +export SERVICE_ACCOUNT_NAMESPACE= + +cat < +export AKS_OIDC_ISSUER="$(az aks show --resource-group $RESOURCE_GROUP --name $CLUSTER_NAME --query "oidcIssuerProfile.issuerUrl" -o tsv)" + +az identity federated-credential create --name $FEDERATED_IDENTITY_NAME \ +--identity-name $UAMI \ +--resource-group $RESOURCE_GROUP \ +--issuer $AKS_OIDC_ISSUER \ +--subject system:serviceaccount:${SERVICE_ACCOUNT_NAMESPACE}:${SERVICE_ACCOUNT_NAME} +``` + +## option#1: static provision with PV +``` +cat <> /mnt/blob/outfile; sleep 1; done + volumeMounts: + - name: persistent-storage + mountPath: /mnt/blob + readOnly: false + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: nginx + volumeClaimTemplates: + - metadata: + name: persistent-storage + spec: + storageClassName: blob-fuse + accessModes: ["ReadWriteMany"] + resources: + requests: + storage: 10Gi +EOF +``` + +## option#2: Pod with ephemeral inline volume +``` +cat <> /mnt/blobfuse/outfile; sleep 1; done + volumeMounts: + - name: persistent-storage + mountPath: "/mnt/blobfuse" + readOnly: false + volumes: + - name: persistent-storage + csi: + driver: blob.csi.azure.com + volumeAttributes: + storageaccount: $ACCOUNT # required + containerName: $CONTAINER # required + clientID: $USER_ASSIGNED_CLIENT_ID # required + resourcegroup: $STORAGE_RESOURCE_GROUP # optional, specified when the storage account is not under AKS node resource group(which is prefixed with "MC_") + # tenantID: $IDENTITY_TENANT # optional, only specified when workload identity and AKS cluster are in different tenant + # subscriptionid: $SUBSCRIPTION # optional, only specified when workload identity and AKS cluster are in different subscription +EOF +``` \ No newline at end of file diff --git a/go.mod b/go.mod index a56afdd05..aee1261cd 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( k8s.io/kubernetes v1.28.5 k8s.io/mount-utils v0.28.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index a6fcad5ed..a1f1b1e8a 100644 --- a/go.sum +++ b/go.sum @@ -473,8 +473,8 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 h1:UybRilKUwfcg3CZh51++O/e6ppBRdT/UY0TjGJfWkPw= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9/go.mod h1:BsbV0DptIzi3NdbPXIxruq9TRI4RSp49eV4CFXBssy4= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb h1:YApm24ngCVkpQTUxu0/wYV/oiccfqWEPZnX1BbpadKY= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb/go.mod h1:UkVMiNELbKLa07K/ubQ+vg8AK3XFyd2FMr5vCIYk0Pg= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e h1:U001A7jnOOi8eiYceYeCLK2S3rTX4K2atR8uNDw+SL8= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e/go.mod h1:dckGAqm0wUQNqqvCEeWhfXKL7DB/r9zchDq9xdcF/Qk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 h1:0XsdZlKjVI0UZYhvg3VbXCPFRRQS5VL1idrTKgzJjnc= diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index b499232f1..a6c722a68 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -96,6 +96,9 @@ const ( requireInfraEncryptionField = "requireinfraencryption" ephemeralField = "csi.storage.k8s.io/ephemeral" podNamespaceField = "csi.storage.k8s.io/pod.namespace" + serviceAccountTokenField = "csi.storage.k8s.io/serviceAccount.tokens" + clientIDField = "clientID" + tenantIDField = "tenantID" mountOptionsField = "mountoptions" falseValue = "false" trueValue = "true" @@ -446,6 +449,9 @@ func (d *Driver) GetAuthEnv(ctx context.Context, volumeID, protocol string, attr authEnv []string getAccountKeyFromSecret bool getLatestAccountKey bool + clientID string + tenantID string + serviceAccountToken string ) for k, v := range attrib { @@ -495,6 +501,12 @@ func (d *Driver) GetAuthEnv(ctx context.Context, volumeID, protocol string, attr if getLatestAccountKey, err = strconv.ParseBool(v); err != nil { return rgName, accountName, accountKey, containerName, authEnv, fmt.Errorf("invalid %s: %s in volume context", getLatestAccountKeyField, v) } + case strings.ToLower(clientIDField): + clientID = v + case strings.ToLower(tenantIDField): + tenantID = v + case strings.ToLower(serviceAccountTokenField): + serviceAccountToken = v } } klog.V(2).Infof("volumeID(%s) authEnv: %s", volumeID, authEnv) @@ -516,6 +528,21 @@ func (d *Driver) GetAuthEnv(ctx context.Context, volumeID, protocol string, attr rgName = d.cloud.ResourceGroup } + if tenantID == "" { + tenantID = d.cloud.TenantID + } + + // if client id is specified, we only use service account token to get account key + if clientID != "" { + klog.V(2).Infof("clientID(%s) is specified, use service account token to get account key", clientID) + if subsID == "" { + subsID = d.cloud.SubscriptionID + } + accountKey, err := d.cloud.GetStorageAccesskeyFromServiceAccountToken(ctx, subsID, accountName, rgName, clientID, tenantID, serviceAccountToken) + authEnv = append(authEnv, "AZURE_STORAGE_ACCESS_KEY="+accountKey) + return rgName, accountName, accountKey, containerName, authEnv, err + } + // 1. If keyVaultURL is not nil, preferentially use the key stored in key vault. // 2. Then if secrets map is not nil, use the key stored in the secrets map. // 3. Finally if both keyVaultURL and secrets map are nil, get the key from Azure. diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index f7044c102..37c1f8860 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -81,6 +81,19 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu mountPermissions := d.mountPermissions context := req.GetVolumeContext() if context != nil { + // token request + if context[serviceAccountTokenField] != "" && getValueInMap(context, clientIDField) != "" { + klog.V(2).Infof("NodePublishVolume: volume(%s) mount on %s with service account token, clientID: %s", volumeID, target, getValueInMap(context, clientIDField)) + _, err := d.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{ + StagingTargetPath: target, + VolumeContext: context, + VolumeCapability: volCap, + VolumeId: volumeID, + }) + return &csi.NodePublishVolumeResponse{}, err + } + + // ephemeral volume if strings.EqualFold(context[ephemeralField], trueValue) { setKeyValueInMap(context, secretNamespaceField, context[podNamespaceField]) if !d.allowInlineVolumeKeyAccessWithIdentity { @@ -241,6 +254,11 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe attrib := req.GetVolumeContext() secrets := req.GetSecrets() + if getValueInMap(attrib, clientIDField) != "" && attrib[serviceAccountTokenField] == "" { + klog.V(2).Infof("Skip NodeStageVolume for volume(%s) since clientID %s is provided but service account token is empty", volumeID, getValueInMap(attrib, clientIDField)) + return &csi.NodeStageVolumeResponse{}, nil + } + mc := metrics.NewMetricContext(blobCSIDriverName, "node_stage_volume", d.cloud.ResourceGroup, "", d.Name) isOperationSucceeded := false defer func() { diff --git a/vendor/modules.txt b/vendor/modules.txt index bee5b5a1c..67c66faaf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1541,7 +1541,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231208022044-b9ede3fc98e9 +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 5d1ed7351..8009029eb 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -41,7 +41,6 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" cloudproviderapi "k8s.io/cloud-provider/api" cloudnodeutil "k8s.io/cloud-provider/node/helpers" @@ -49,6 +48,7 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/yaml" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient" @@ -279,7 +279,7 @@ type Config struct { // ClusterServiceLoadBalancerHealthProbeMode determines the health probe mode for cluster service load balancer. // Supported values are `shared` and `servicenodeport`. - // `unshared`: the health probe will be created against each port of each service by watching the backend application (default). + // `servicenodeport`: the health probe will be created against each port of each service by watching the backend application (default). // `shared`: all cluster services shares one HTTP probe targeting the kube-proxy on the node (/healthz:10256). ClusterServiceLoadBalancerHealthProbeMode string `json:"clusterServiceLoadBalancerHealthProbeMode,omitempty" yaml:"clusterServiceLoadBalancerHealthProbeMode,omitempty"` // ClusterServiceSharedLoadBalancerHealthProbePort defines the target port of the shared health probe. Default to 10256. @@ -382,6 +382,8 @@ type Cloud struct { PrivateLinkServiceClient privatelinkserviceclient.Interface containerServiceClient containerserviceclient.Interface deploymentClient deploymentclient.Interface + ComputeClientFactory azclient.ClientFactory + NetworkClientFactory azclient.ClientFactory ResourceRequestBackoff wait.Backoff Metadata *InstanceMetadataService @@ -601,13 +603,11 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, return fmt.Errorf("clusterServiceLoadBalancerHealthProbeMode %s is not supported, supported values are %v", config.ClusterServiceLoadBalancerHealthProbeMode, supportedClusterServiceLoadBalancerHealthProbeModes.UnsortedList()) } } - if strings.EqualFold(config.ClusterServiceLoadBalancerHealthProbeMode, consts.ClusterServiceLoadBalancerHealthProbeModeShared) { - if config.ClusterServiceSharedLoadBalancerHealthProbePort == 0 { - config.ClusterServiceSharedLoadBalancerHealthProbePort = consts.ClusterServiceLoadBalancerHealthProbeDefaultPort - } - if config.ClusterServiceSharedLoadBalancerHealthProbePath == "" { - config.ClusterServiceSharedLoadBalancerHealthProbePath = consts.ClusterServiceLoadBalancerHealthProbeDefaultPath - } + if config.ClusterServiceSharedLoadBalancerHealthProbePort == 0 { + config.ClusterServiceSharedLoadBalancerHealthProbePort = consts.ClusterServiceLoadBalancerHealthProbeDefaultPort + } + if config.ClusterServiceSharedLoadBalancerHealthProbePath == "" { + config.ClusterServiceSharedLoadBalancerHealthProbePath = consts.ClusterServiceLoadBalancerHealthProbeDefaultPath } env, err := ratelimitconfig.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem) @@ -719,7 +719,6 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, common := &controllerCommon{ cloud: az, lockMap: newLockMap(), - diskOpRateLimiter: flowcontrol.NewTokenBucketRateLimiter(qps, bucket), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, } @@ -1168,7 +1167,6 @@ func InitDiskControllers(az *Cloud) error { common := &controllerCommon{ cloud: az, lockMap: newLockMap(), - diskOpRateLimiter: flowcontrol.NewTokenBucketRateLimiter(qps, bucket), AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 1248a57b1..48c9491ba 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" volerr "k8s.io/cloud-provider/volume/errors" "k8s.io/klog/v2" @@ -102,8 +101,6 @@ type controllerCommon struct { // > attachDiskMap sync.Map detachDiskMap sync.Map - // attach/detach disk rate limiter - diskOpRateLimiter flowcontrol.RateLimiter // DisableUpdateCache whether disable update cache in disk attach/detach DisableUpdateCache bool // DisableDiskLunCheck whether disable disk lun check after disk attach/detach @@ -167,10 +164,9 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.Azu } // AttachDisk attaches a disk to vm -// parameter async indicates whether allow multiple batch disk attach on one node in parallel // occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments // return (lun, error) -func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, diskURI string, nodeName types.NodeName, +func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes, disk *compute.Disk, occupiedLuns []int) (int32, error) { diskEncryptionSetID := "" writeAcceleratorEnabled := false @@ -300,17 +296,6 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, return -1, err } // err will be handled by waitForUpdateResult below - - if async && c.diskOpRateLimiter.TryAccept() { - // unlock and wait for attach disk complete - unlock = true - c.lockMap.UnlockEntry(node) - } else { - if async { - klog.Warningf("azureDisk - switch to batch operation due to rate limited, QPS: %f", c.diskOpRateLimiter.QPS()) - } - } - if err = c.waitForUpdateResult(ctx, vmset, nodeName, future, err); err != nil { return -1, err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index ad2e808f4..f7817d418 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -2152,6 +2152,8 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb } func (az *Cloud) reconcileLBProbes(lb *network.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedProbes []network.Probe) bool { + expectedProbes, _ = az.keepSharedProbe(service, *lb, expectedProbes, wantLb) + // remove unwanted probes dirtyProbes := false var updatedProbes []network.Probe diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go index 9cf7bce13..b754e0667 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_healthprobe.go @@ -291,3 +291,42 @@ func findProbe(probes []network.Probe, probe network.Probe) bool { } return false } + +// keepSharedProbe ensures the shared probe will not be removed if there are more than 1 service referencing it. +func (az *Cloud) keepSharedProbe( + service *v1.Service, + lb network.LoadBalancer, + expectedProbes []network.Probe, + wantLB bool, +) ([]network.Probe, error) { + var shouldConsiderRemoveSharedProbe bool + if !wantLB { + shouldConsiderRemoveSharedProbe = true + } + + if lb.LoadBalancerPropertiesFormat != nil && lb.Probes != nil { + for _, probe := range *lb.Probes { + if strings.EqualFold(pointer.StringDeref(probe.Name, ""), consts.SharedProbeName) { + if !az.useSharedLoadBalancerHealthProbeMode() { + shouldConsiderRemoveSharedProbe = true + } + if probe.ProbePropertiesFormat != nil && probe.LoadBalancingRules != nil { + for _, rule := range *probe.LoadBalancingRules { + ruleName, err := getLastSegment(*rule.ID, "/") + if err != nil { + klog.Errorf("failed to parse load balancing rule name %s attached to health probe %s", *rule.ID, *probe.ID) + return []network.Probe{}, err + } + if !az.serviceOwnsRule(service, ruleName) && shouldConsiderRemoveSharedProbe { + klog.V(4).Infof("there are load balancing rule %s of another service referencing the health probe %s, so the health probe should not be removed", *rule.ID, *probe.ID) + sharedProbe := az.buildClusterServiceSharedProbe() + expectedProbes = append(expectedProbes, *sharedProbe) + return expectedProbes, nil + } + } + } + } + } + } + return expectedProbes, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index d7602be72..52d296292 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -19,11 +19,13 @@ package provider import ( "context" "crypto/rand" + "encoding/json" "fmt" "math/big" "strings" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" @@ -31,6 +33,7 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/pointer" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" "sigs.k8s.io/cloud-provider-azure/pkg/cache" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" @@ -41,6 +44,7 @@ import ( const SkipMatchingTag = "skip-matching" const LocationGlobal = "global" const privateDNSZoneNameFmt = "privatelink.%s.%s" +const DefaultTokenAudience = "api://AzureADTokenExchange" //nolint:gosec // G101 ignore this! type StorageType string @@ -129,6 +133,62 @@ func (az *Cloud) getStorageAccounts(ctx context.Context, accountOptions *Account return accounts, nil } +// serviceAccountToken represents the service account token sent from NodePublishVolume Request. +// ref: https://kubernetes-csi.github.io/docs/token-requests.html +type serviceAccountToken struct { + APIAzureADTokenExchange struct { + Token string `json:"token"` + ExpirationTimestamp time.Time `json:"expirationTimestamp"` + } `json:"api://AzureADTokenExchange"` +} + +// parseServiceAccountToken parses the bound service account token from the token passed from NodePublishVolume Request. +func parseServiceAccountToken(tokenStr string) (string, error) { + if len(tokenStr) == 0 { + return "", fmt.Errorf("service account token is empty") + } + token := serviceAccountToken{} + if err := json.Unmarshal([]byte(tokenStr), &token); err != nil { + return "", fmt.Errorf("failed to unmarshal service account tokens, error: %w", err) + } + if token.APIAzureADTokenExchange.Token == "" { + return "", fmt.Errorf("token for audience %s not found", DefaultTokenAudience) + } + return token.APIAzureADTokenExchange.Token, nil +} + +func (az *Cloud) GetStorageAccesskeyFromServiceAccountToken(ctx context.Context, subsID, accountName, rgName, clientID, tenantID, serviceAccountToken string) (string, error) { + cred, err := azidentity.NewClientAssertionCredential(tenantID, clientID, func(context.Context) (string, error) { + return parseServiceAccountToken(serviceAccountToken) + }, &azidentity.ClientAssertionCredentialOptions{}) + if err != nil { + return "", fmt.Errorf("failed to create client assertion credential, error: %w", err) + } + + client, err := accountclient.New(subsID, cred, nil) + if err != nil { + return "", fmt.Errorf("failed to create storage account client, error: %w", err) + } + + keys, err := client.ListKeys(ctx, rgName, accountName) + if err != nil { + return "", fmt.Errorf("failed to list keys, error: %w", err) + } + + for _, k := range keys { + if k != nil && k.Value != nil && *k.Value != "" { + v := *k.Value + if ind := strings.LastIndex(v, " "); ind >= 0 { + v = v[(ind + 1):] + } + // get first key + return v, nil + } + } + + return "", fmt.Errorf("failed to list keys, found no key") +} + // GetStorageAccesskey gets the storage account access key // getLatestAccountKey: get the latest account key per CreationTime if true, otherwise get the first account key func (az *Cloud) GetStorageAccesskey(ctx context.Context, subsID, account, resourceGroup string, getLatestAccountKey bool) (string, error) { From 468d115d834f7bf1b138d701b5e8d868a550e43e Mon Sep 17 00:00:00 2001 From: weizhi Date: Fri, 5 Jan 2024 20:35:42 +0800 Subject: [PATCH 039/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index fbb7426a8..ceffb8004 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -16,7 +16,7 @@ export REGION= ``` -### 2. Create a new storage account and fileshare +### 2. Create a new storage account and container Following the [documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-cli) to create a new storage account and container or use your own. And export following environment variables: ``` @@ -175,4 +175,4 @@ spec: # tenantID: $IDENTITY_TENANT # optional, only specified when workload identity and AKS cluster are in different tenant # subscriptionid: $SUBSCRIPTION # optional, only specified when workload identity and AKS cluster are in different subscription EOF -``` \ No newline at end of file +``` From 30155d22e5c6dc88958c6d8461780a66ceadd01a Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 5 Jan 2024 21:11:12 +0800 Subject: [PATCH 040/157] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7393e9455..c2c89e77a 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ This option does not depend on cloud provider config file, supports cross subscr - [NFSv3](./deploy/example/nfs) - [fsGroupPolicy](./deploy/example/fsgroup) - [Volume cloning](./deploy/example/cloning) - - [Workload identity](./docs/workload-identity.md) + - [Workload identity](./docs/workload-identity-static-pv-mount.md) ### Troubleshooting - [CSI driver troubleshooting guide](./docs/csi-debug.md) From 6dc3983de313b6c880ae30f04f7d9ef9fe59ef5d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 5 Jan 2024 21:12:33 +0800 Subject: [PATCH 041/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index ceffb8004..18ca5d745 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,9 +1,8 @@ # Example of static PV mount with workload identity -> Note: -> - Available kubernetes version >= v1.20 + - supported from v1.23.3 -## prerequisite +## prerequisites ### 1. Create a cluster with oidc-issuer enabled and get the credential From d28d57bb36e2ea56525c3f31e7ba62b8ae9c8620 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 8 Jan 2024 10:21:18 +0800 Subject: [PATCH 042/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 47 +---------------------- 1 file changed, 2 insertions(+), 45 deletions(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index 18ca5d745..69b9a82e9 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,10 +1,6 @@ -# Example of static PV mount with workload identity - +# workload identity support on static provisioning - supported from v1.23.3 - -## prerequisites - - +## Prerequisites ### 1. Create a cluster with oidc-issuer enabled and get the credential Following the [documentation](https://learn.microsoft.com/en-us/azure/aks/use-oidc-issuer#create-an-aks-cluster-with-oidc-issuer) to create an AKS cluster with the `--enable-oidc-issuer` parameter and get the AKS credentials. And export following environment variables: @@ -14,9 +10,7 @@ export CLUSTER_NAME= export REGION= ``` - ### 2. Create a new storage account and container - Following the [documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-cli) to create a new storage account and container or use your own. And export following environment variables: ``` export STORAGE_RESOURCE_GROUP= @@ -63,7 +57,6 @@ az identity federated-credential create --name $FEDERATED_IDENTITY_NAME \ --subject system:serviceaccount:${SERVICE_ACCOUNT_NAMESPACE}:${SERVICE_ACCOUNT_NAME} ``` -## option#1: static provision with PV ``` cat <> /mnt/blobfuse/outfile; sleep 1; done - volumeMounts: - - name: persistent-storage - mountPath: "/mnt/blobfuse" - readOnly: false - volumes: - - name: persistent-storage - csi: - driver: blob.csi.azure.com - volumeAttributes: - storageaccount: $ACCOUNT # required - containerName: $CONTAINER # required - clientID: $USER_ASSIGNED_CLIENT_ID # required - resourcegroup: $STORAGE_RESOURCE_GROUP # optional, specified when the storage account is not under AKS node resource group(which is prefixed with "MC_") - # tenantID: $IDENTITY_TENANT # optional, only specified when workload identity and AKS cluster are in different tenant - # subscriptionid: $SUBSCRIPTION # optional, only specified when workload identity and AKS cluster are in different subscription -EOF -``` From bfd8358a4528830a56e1c1fccce18f961e6c094a Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 8 Jan 2024 10:51:29 +0800 Subject: [PATCH 043/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 45 +++++++++++++++++++++-- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index 69b9a82e9..e94b74a5b 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,5 +1,8 @@ # workload identity support on static provisioning - supported from v1.23.3 + +This feature is specifically designed for blobfuse mount and is not available for NFS mount as NFS mount does not require credentials. There is a standalone blobfuse mount for every pod, it may cause performance issues when multiple pods are present on a single node. + ## Prerequisites ### 1. Create a cluster with oidc-issuer enabled and get the credential @@ -10,12 +13,12 @@ export CLUSTER_NAME= export REGION= ``` -### 2. Create a new storage account and container +### 2. Bring your own storage account and storage container Following the [documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-cli) to create a new storage account and container or use your own. And export following environment variables: ``` export STORAGE_RESOURCE_GROUP= export ACCOUNT= -export CONTAINER= +export CONTAINER= ``` ### 3. Create managed identity and role assignment @@ -56,7 +59,7 @@ az identity federated-credential create --name $FEDERATED_IDENTITY_NAME \ --issuer $AKS_OIDC_ISSUER \ --subject system:serviceaccount:${SERVICE_ACCOUNT_NAMESPACE}:${SERVICE_ACCOUNT_NAME} ``` - +## option#1: static provision with PV ``` cat <> /mnt/blobfuse/outfile; sleep 1; done + volumeMounts: + - name: persistent-storage + mountPath: "/mnt/blobfuse" + readOnly: false + volumes: + - name: persistent-storage + csi: + driver: blob.csi.azure.com + volumeAttributes: + storageaccount: $ACCOUNT # required + containerName: $CONTAINER # required + clientID: $USER_ASSIGNED_CLIENT_ID # required + resourcegroup: $STORAGE_RESOURCE_GROUP # optional, specified when the storage account is not under AKS node resource group(which is prefixed with "MC_") + # tenantID: $IDENTITY_TENANT # optional, only specified when workload identity and AKS cluster are in different tenant + # subscriptionid: $SUBSCRIPTION # optional, only specified when workload identity and AKS cluster are in different subscription +EOF +``` From 7f026745f2cbc49ce6cdb97b14669f2cc391d700 Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Tue, 9 Jan 2024 18:00:37 +0800 Subject: [PATCH 044/157] replace gomock with go.uber.org/mock and bump k8s.io to 1.29 --- go.mod | 127 +- go.sum | 172 +- pkg/blob/azure_test.go | 2 +- pkg/blob/blob_test.go | 2 +- pkg/blob/controllerserver_test.go | 2 +- pkg/blob/nodeserver_test.go | 2 +- pkg/util/util_mock.go | 2 +- pkg/util/util_test.go | 2 +- test/e2e/suite_test.go | 35 +- test/e2e/testsuites/testsuites.go | 2 +- test/utils/azure/azure_helper.go | 5 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 10 + .../sdk/azcore/internal/shared/constants.go | 4 +- .../sdk/azcore/internal/shared/shared.go | 62 +- .../compute/armcompute/v5/CHANGELOG.md | 59 + .../compute/armcompute/v5/README.md | 4 +- .../compute/armcompute/v5/assets.json | 2 +- .../compute/armcompute/v5/autorest.md | 8 +- .../armcompute/v5/availabilitysets_client.go | 28 +- .../v5/capacityreservationgroups_client.go | 32 +- .../v5/capacityreservations_client.go | 26 +- .../v5/communitygalleries_client.go | 4 +- .../v5/communitygalleryimages_client.go | 8 +- .../communitygalleryimageversions_client.go | 8 +- .../compute/armcompute/v5/constants.go | 106 +- .../v5/dedicatedhostgroups_client.go | 24 +- .../armcompute/v5/dedicatedhosts_client.go | 123 +- .../compute/armcompute/v5/galleries_client.go | 30 +- .../v5/galleryapplications_client.go | 26 +- .../v5/galleryapplicationversions_client.go | 26 +- .../armcompute/v5/galleryimages_client.go | 26 +- .../v5/galleryimageversions_client.go | 26 +- .../v5/gallerysharingprofile_client.go | 6 +- .../compute/armcompute/v5/images_client.go | 30 +- .../armcompute/v5/loganalytics_client.go | 12 +- .../compute/armcompute/v5/models.go | 296 +- .../compute/armcompute/v5/models_serde.go | 619 +++ .../armcompute/v5/operations_client.go | 4 +- .../compute/armcompute/v5/options.go | 80 +- .../v5/proximityplacementgroups_client.go | 24 +- .../compute/armcompute/v5/response_types.go | 27 + .../v5/restorepointcollections_client.go | 26 +- .../armcompute/v5/restorepoints_client.go | 16 +- .../armcompute/v5/sharedgalleries_client.go | 8 +- .../v5/sharedgalleryimages_client.go | 8 +- .../v5/sharedgalleryimageversions_client.go | 8 +- .../armcompute/v5/sshpublickeys_client.go | 34 +- .../compute/armcompute/v5/usage_client.go | 4 +- .../virtualmachineextensionimages_client.go | 12 +- .../v5/virtualmachineextensions_client.go | 26 +- .../v5/virtualmachineimages_client.go | 24 +- .../v5/virtualmachineimagesedgezone_client.go | 20 +- .../v5/virtualmachineruncommands_client.go | 34 +- .../armcompute/v5/virtualmachines_client.go | 225 +- ...virtualmachinescalesetextensions_client.go | 26 +- ...almachinescalesetrollingupgrades_client.go | 22 +- .../v5/virtualmachinescalesets_client.go | 222 +- ...rtualmachinescalesetvmextensions_client.go | 26 +- ...tualmachinescalesetvmruncommands_client.go | 26 +- .../v5/virtualmachinescalesetvms_client.go | 259 +- .../v5/virtualmachinesizes_client.go | 4 +- .../mgmt/2018-09-01/privatedns/CHANGELOG.md | 2 - .../mgmt/2018-09-01/privatedns/_meta.json | 11 - .../mgmt/2018-09-01/privatedns/client.go | 43 - .../mgmt/2018-09-01/privatedns/enums.go | 72 - .../mgmt/2018-09-01/privatedns/models.go | 1350 ----- .../2018-09-01/privatedns/privatezones.go | 614 -- .../mgmt/2018-09-01/privatedns/recordsets.go | 640 --- .../mgmt/2018-09-01/privatedns/version.go | 19 - .../privatedns/virtualnetworklinks.go | 509 -- .../distribution/reference/.gitattributes | 1 + .../distribution/reference/.gitignore | 2 + .../distribution/reference/.golangci.yml | 18 + .../distribution/reference/CODE-OF-CONDUCT.md | 5 + .../distribution/reference/CONTRIBUTING.md | 114 + .../distribution/reference/GOVERNANCE.md | 144 + .../reference}/LICENSE | 0 .../distribution/reference/MAINTAINERS | 26 + .../distribution/reference/Makefile | 25 + .../distribution/reference/README.md | 30 + .../distribution/reference/SECURITY.md | 7 + .../reference/distribution-logo.svg | 1 + .../distribution/reference/helpers.go | 2 +- .../distribution/reference/normalize.go | 131 +- .../distribution/reference/reference.go | 27 +- .../distribution/reference/regexp.go | 163 + .../github.com/distribution/reference/sort.go | 75 + .../docker/distribution/digestset/set.go | 247 - .../docker/distribution/reference/regexp.go | 143 - .../emicklei/go-restful/v3/CHANGES.md | 8 +- .../emicklei/go-restful/v3/README.md | 9 +- .../emicklei/go-restful/v3/route.go | 17 +- .../emicklei/go-restful/v3/route_builder.go | 59 +- vendor/github.com/go-logr/logr/README.md | 73 +- vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + vendor/github.com/go-logr/logr/funcr/funcr.go | 185 +- .../github.com/go-logr/logr/funcr/slogsink.go | 105 + vendor/github.com/go-logr/logr/logr.go | 43 - .../go-logr/logr/{slogr => }/sloghandler.go | 98 +- vendor/github.com/go-logr/logr/slogr.go | 100 + vendor/github.com/go-logr/logr/slogr/slogr.go | 77 +- .../go-logr/logr/{slogr => }/slogsink.go | 24 +- vendor/github.com/golang/mock/CONTRIBUTORS | 37 - .../github.com/google/cel-go/cel/BUILD.bazel | 8 +- vendor/github.com/google/cel-go/cel/decls.go | 1051 +--- vendor/github.com/google/cel-go/cel/env.go | 368 +- vendor/github.com/google/cel-go/cel/io.go | 46 +- .../github.com/google/cel-go/cel/library.go | 260 +- vendor/github.com/google/cel-go/cel/macro.go | 11 +- .../github.com/google/cel-go/cel/options.go | 176 +- .../github.com/google/cel-go/cel/program.go | 69 +- .../github.com/google/cel-go/cel/validator.go | 388 ++ .../google/cel-go/checker/BUILD.bazel | 6 + .../google/cel-go/checker/checker.go | 416 +- .../github.com/google/cel-go/checker/cost.go | 108 +- .../google/cel-go/checker/decls/BUILD.bazel | 1 - .../github.com/google/cel-go/checker/env.go | 264 +- .../google/cel-go/checker/errors.go | 94 +- .../google/cel-go/checker/format.go | 216 + .../google/cel-go/checker/mapping.go | 14 +- .../google/cel-go/checker/options.go | 13 +- .../google/cel-go/checker/printer.go | 3 + .../cel-go/checker/{decls => }/scopes.go | 38 +- .../google/cel-go/checker/standard.go | 481 +- .../github.com/google/cel-go/checker/types.go | 402 +- .../google/cel-go/common/ast/BUILD.bazel | 52 + .../google/cel-go/common/ast/ast.go | 226 + .../google/cel-go/common/ast/expr.go | 709 +++ .../google/cel-go/common/decls/BUILD.bazel | 39 + .../google/cel-go/common/decls/decls.go | 844 +++ .../github.com/google/cel-go/common/error.go | 8 +- .../github.com/google/cel-go/common/errors.go | 16 +- .../cel-go/common/functions/BUILD.bazel | 17 + .../cel-go/common/functions/functions.go | 61 + .../github.com/google/cel-go/common/source.go | 3 - .../google/cel-go/common/stdlib/BUILD.bazel | 25 + .../google/cel-go/common/stdlib/standard.go | 661 +++ .../google/cel-go/common/types/BUILD.bazel | 7 +- .../google/cel-go/common/types/bool.go | 6 - .../google/cel-go/common/types/bytes.go | 7 - .../google/cel-go/common/types/double.go | 10 - .../google/cel-go/common/types/duration.go | 68 +- .../google/cel-go/common/types/err.go | 7 +- .../google/cel-go/common/types/int.go | 11 - .../google/cel-go/common/types/iterator.go | 2 +- .../google/cel-go/common/types/list.go | 90 +- .../google/cel-go/common/types/map.go | 87 +- .../google/cel-go/common/types/null.go | 2 - .../google/cel-go/common/types/object.go | 18 +- .../google/cel-go/common/types/optional.go | 2 +- .../google/cel-go/common/types/pb/type.go | 23 +- .../google/cel-go/common/types/provider.go | 244 +- .../cel-go/common/types/ref/provider.go | 37 +- .../google/cel-go/common/types/string.go | 44 +- .../google/cel-go/common/types/timestamp.go | 10 - .../google/cel-go/common/types/type.go | 102 - .../google/cel-go/common/types/types.go | 806 +++ .../google/cel-go/common/types/uint.go | 10 - .../google/cel-go/common/types/unknown.go | 290 +- .../google/cel-go/common/types/util.go | 2 +- .../github.com/google/cel-go/ext/BUILD.bazel | 5 +- vendor/github.com/google/cel-go/ext/README.md | 17 + .../github.com/google/cel-go/ext/bindings.go | 8 +- .../github.com/google/cel-go/ext/encoders.go | 5 - vendor/github.com/google/cel-go/ext/guards.go | 1 + vendor/github.com/google/cel-go/ext/lists.go | 94 + vendor/github.com/google/cel-go/ext/math.go | 33 +- vendor/github.com/google/cel-go/ext/native.go | 92 +- vendor/github.com/google/cel-go/ext/protos.go | 12 +- vendor/github.com/google/cel-go/ext/sets.go | 61 +- .../github.com/google/cel-go/ext/strings.go | 38 +- .../google/cel-go/interpreter/BUILD.bazel | 8 +- .../google/cel-go/interpreter/activation.go | 2 +- .../cel-go/interpreter/attribute_patterns.go | 42 +- .../google/cel-go/interpreter/attributes.go | 52 +- .../google/cel-go/interpreter/decorators.go | 10 +- .../google/cel-go/interpreter/dispatcher.go | 2 +- .../google/cel-go/interpreter/evalstate.go | 6 +- .../google/cel-go/interpreter/formatting.go | 2 +- .../cel-go/interpreter/functions/BUILD.bazel | 9 +- .../cel-go/interpreter/functions/functions.go | 33 +- .../cel-go/interpreter/functions/standard.go | 270 - .../cel-go/interpreter/interpretable.go | 311 +- .../google/cel-go/interpreter/interpreter.go | 28 +- .../google/cel-go/interpreter/planner.go | 65 +- .../google/cel-go/interpreter/prune.go | 45 +- .../google/cel-go/interpreter/runtimecost.go | 63 +- .../github.com/google/cel-go/parser/errors.go | 17 +- .../github.com/google/cel-go/parser/helper.go | 113 +- .../github.com/google/cel-go/parser/macro.go | 17 +- .../google/cel-go/parser/options.go | 13 + .../github.com/google/cel-go/parser/parser.go | 41 +- vendor/github.com/google/uuid/CHANGELOG.md | 7 + vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 53 + vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 75 + .../github.com/gorilla/websocket/.gitignore | 25 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 39 + vendor/github.com/gorilla/websocket/client.go | 422 ++ .../gorilla/websocket/compression.go | 148 + vendor/github.com/gorilla/websocket/conn.go | 1230 ++++ vendor/github.com/gorilla/websocket/doc.go | 227 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/mask_safe.go | 16 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 77 + vendor/github.com/gorilla/websocket/server.go | 365 ++ .../gorilla/websocket/tls_handshake.go | 21 + .../gorilla/websocket/tls_handshake_116.go | 21 + vendor/github.com/gorilla/websocket/util.go | 283 + .../gorilla/websocket/x_net_proxy.go | 473 ++ vendor/github.com/mxk/go-flowrate/LICENSE | 29 + .../mxk/go-flowrate/flowrate/flowrate.go | 267 + .../github.com/mxk/go-flowrate/flowrate/io.go | 133 + .../mxk/go-flowrate/flowrate/util.go | 67 + .../opencontainers/selinux/go-selinux/doc.go | 1 - .../selinux/go-selinux/label/label.go | 22 +- .../selinux/go-selinux/label/label_linux.go | 46 - .../selinux/go-selinux/label/label_stub.go | 1 + .../selinux/go-selinux/rchcon.go | 22 - .../selinux/go-selinux/rchcon_go115.go | 21 - .../selinux/go-selinux/selinux.go | 30 +- .../selinux/go-selinux/selinux_linux.go | 257 +- .../selinux/go-selinux/selinux_stub.go | 45 +- .../selinux/pkg/pwalk/README.md | 48 - .../opencontainers/selinux/pkg/pwalk/pwalk.go | 115 - .../selinux/pkg/pwalkdir/pwalkdir.go | 2 +- .../etcd/api/v3/etcdserverpb/rpc.pb.go | 516 +- .../etcd/api/v3/etcdserverpb/rpc.proto | 2 +- .../etcd/api/v3/v3rpc/rpctypes/error.go | 3 + .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../etcd/client/pkg/v3/transport/sockopt.go | 4 +- .../etcd/client/pkg/v3/transport/tls.go | 5 +- vendor/go.etcd.io/etcd/client/v3/client.go | 5 +- .../client/v3/internal/endpoint/endpoint.go | 18 +- vendor/go.etcd.io/etcd/client/v3/lease.go | 4 +- .../golang => go.uber.org}/mock/AUTHORS | 0 .../golang => go.uber.org}/mock/LICENSE | 0 .../mock/gomock/call.go | 127 +- .../mock/gomock/callset.go | 59 +- .../mock/gomock/controller.go | 138 +- vendor/go.uber.org/mock/gomock/doc.go | 60 + .../mock/gomock/matchers.go | 172 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 + .../admissionregistration/v1alpha1/types.go | 2 +- .../admissionregistration/v1beta1/types.go | 2 +- vendor/k8s.io/api/batch/v1/generated.proto | 26 +- vendor/k8s.io/api/batch/v1/types.go | 46 +- .../batch/v1/types_swagger_doc_generated.go | 14 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4261 +++++++++----- vendor/k8s.io/api/core/v1/generated.proto | 181 +- vendor/k8s.io/api/core/v1/types.go | 216 +- .../core/v1/types_swagger_doc_generated.go | 91 +- .../api/core/v1/zz_generated.deepcopy.go | 143 + .../k8s.io/api/discovery/v1/generated.proto | 4 +- vendor/k8s.io/api/discovery/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 4 +- .../api/discovery/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/flowcontrol/{v1alpha1 => v1}/doc.go | 7 +- .../{v1alpha1 => v1}/generated.pb.go | 329 +- .../{v1alpha1 => v1}/generated.proto | 41 +- .../flowcontrol/{v1alpha1 => v1}/register.go | 6 +- .../api/flowcontrol/{v1alpha1 => v1}/types.go | 107 +- .../types_swagger_doc_generated.go | 4 +- .../{v1alpha1 => v1}/zz_generated.deepcopy.go | 7 +- .../zz_generated.prerelease-lifecycle.go | 122 - .../api/flowcontrol/v1beta1/generated.proto | 2 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/flowcontrol/v1beta2/generated.proto | 2 +- .../k8s.io/api/flowcontrol/v1beta2/types.go | 2 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/flowcontrol/v1beta3/types.go | 18 + .../zz_generated.prerelease-lifecycle.go | 28 + .../api/networking/v1alpha1/generated.pb.go | 966 ++-- .../api/networking/v1alpha1/generated.proto | 115 +- .../api/networking/v1alpha1/register.go | 4 +- .../k8s.io/api/networking/v1alpha1/types.go | 147 +- .../v1alpha1/types_swagger_doc_generated.go | 72 +- .../v1alpha1/zz_generated.deepcopy.go | 110 +- .../zz_generated.prerelease-lifecycle.go | 36 +- vendor/k8s.io/api/policy/v1/doc.go | 2 +- vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 4950 ++--------------- .../k8s.io/api/policy/v1beta1/generated.proto | 277 - vendor/k8s.io/api/policy/v1beta1/register.go | 2 - vendor/k8s.io/api/policy/v1beta1/types.go | 371 -- .../v1beta1/types_swagger_doc_generated.go | 160 - .../policy/v1beta1/zz_generated.deepcopy.go | 367 -- .../zz_generated.prerelease-lifecycle.go | 36 - .../api/resource/v1alpha2/generated.proto | 4 +- vendor/k8s.io/api/resource/v1alpha2/types.go | 4 +- vendor/k8s.io/api/storage/v1/generated.proto | 2 +- vendor/k8s.io/api/storage/v1/types.go | 2 +- .../storage/v1/types_swagger_doc_generated.go | 2 +- .../api/storage/v1alpha1/generated.pb.go | 729 ++- .../api/storage/v1alpha1/generated.proto | 40 + .../k8s.io/api/storage/v1alpha1/register.go | 2 + vendor/k8s.io/api/storage/v1alpha1/types.go | 52 + .../v1alpha1/types_swagger_doc_generated.go | 21 + .../storage/v1alpha1/zz_generated.deepcopy.go | 66 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/storage/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/storage/v1beta1/types.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../apis/apiextensions/types_jsonschema.go | 31 + .../pkg/apis/apiextensions/v1/generated.pb.go | 429 +- .../pkg/apis/apiextensions/v1/generated.proto | 30 + .../apis/apiextensions/v1/types_jsonschema.go | 31 + .../v1/zz_generated.conversion.go | 2 + .../apiextensions/v1/zz_generated.deepcopy.go | 5 + .../apiextensions/zz_generated.deepcopy.go | 5 + .../apimachinery/pkg/api/meta/conditions.go | 37 +- .../apimachinery/pkg/api/resource/amount.go | 38 + .../apimachinery/pkg/api/resource/quantity.go | 10 + .../pkg/apis/meta/v1/unstructured/helpers.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 23 + .../runtime/serializer/streaming/streaming.go | 20 - .../pkg/util/cache/lruexpirecache.go | 13 + .../pkg/util/httpstream/httpstream.go | 21 + .../pkg/util/httpstream/spdy/roundtripper.go | 55 +- .../pkg/util/httpstream/wsstream/conn.go | 112 +- .../pkg/util/httpstream/wsstream/doc.go | 52 +- .../apimachinery/pkg/util/intstr/intstr.go | 6 +- .../managedfields/internal/structuredmerge.go | 9 +- .../managedfields/internal/typeconverter.go | 14 +- .../apimachinery/pkg/util/proxy/dial.go | 122 + .../apimachinery/pkg/util/proxy}/doc.go | 6 +- .../apimachinery/pkg/util/proxy/transport.go | 272 + .../pkg/util/proxy/upgradeaware.go | 556 ++ .../pkg/util/remotecommand/constants.go | 14 + .../pkg/util/strategicpatch/meta.go | 89 + .../pkg/util/validation/field/errors.go | 4 +- .../apimachinery/pkg/util/version/version.go | 42 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 38 +- .../k8s.io/apiserver/pkg/admission/config.go | 5 +- .../pkg/admission/plugin/cel/compile.go | 2 + .../pkg/admission/plugin/cel/composition.go | 52 +- .../validatingadmissionpolicy/typechecking.go | 20 +- .../plugin/webhook/config/kubeconfig.go | 3 +- .../k8s.io/apiserver/pkg/admission/plugins.go | 3 +- .../apiserver/pkg/apis/apiserver/register.go | 2 + .../apiserver/pkg/apis/apiserver/types.go | 185 + .../pkg/apis/apiserver/v1alpha1/defaults.go | 36 + .../pkg/apis/apiserver/v1alpha1/register.go | 4 +- .../pkg/apis/apiserver/v1alpha1/types.go | 376 ++ .../v1alpha1/zz_generated.conversion.go | 496 ++ .../v1alpha1/zz_generated.deepcopy.go | 305 + .../v1alpha1/zz_generated.defaults.go | 10 + .../apis/apiserver/validation/validation.go | 630 +++ .../apis/apiserver/zz_generated.deepcopy.go | 305 + .../k8s.io/apiserver/pkg/apis/audit/types.go | 16 +- .../pkg/apis/audit/v1/generated.proto | 16 +- .../apiserver/pkg/apis/audit/v1/types.go | 16 +- .../pkg/apis/flowcontrol/bootstrap/default.go | 36 +- .../pkg/authentication/cel/compile.go | 154 + .../pkg/authentication/cel/interface.go | 147 + .../pkg/authentication/cel/mapper.go | 97 + .../pkg/authentication/request/x509/x509.go | 27 + .../pkg/authentication/serviceaccount/util.go | 45 +- .../authorizerfactory/delegating.go | 1 + .../pkg/authorization/cel/compile.go | 214 + .../pkg/authorization/cel/interface.go | 41 + .../pkg/authorization/cel/matcher.go | 66 + .../apiserver/pkg/cel/common/adaptor.go | 25 + .../apiserver/pkg/cel/common/equality.go | 334 ++ .../apiserver/pkg/cel/common/schemas.go | 19 +- .../k8s.io/apiserver/pkg/cel/common/values.go | 26 +- .../apiserver/pkg/cel/environment/base.go | 47 +- vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go | 2 +- .../k8s.io/apiserver/pkg/cel/library/authz.go | 6 +- .../k8s.io/apiserver/pkg/cel/library/cost.go | 15 +- .../k8s.io/apiserver/pkg/cel/library/lists.go | 4 + .../apiserver/pkg/cel/library/quantity.go | 5 + .../k8s.io/apiserver/pkg/cel/library/regex.go | 4 + .../k8s.io/apiserver/pkg/cel/library/test.go | 4 + .../k8s.io/apiserver/pkg/cel/library/urls.go | 4 + .../apiserver/pkg/cel/openapi/adaptor.go | 82 + .../apiserver/pkg/cel/openapi/extensions.go | 45 + .../pkg/cel/openapi/resolver/combined.go | 45 + .../pkg/cel/openapi/resolver/definitions.go | 27 +- .../pkg/cel/openapi/resolver/discovery.go | 16 +- .../pkg/cel/openapi/resolver/refs.go | 32 +- .../endpoints/discovery/aggregated/handler.go | 2 +- .../pkg/endpoints/filters/impersonation.go | 20 +- .../apiserver/pkg/endpoints/filters/traces.go | 11 +- .../apiserver/pkg/endpoints/handlers/get.go | 2 +- .../pkg/endpoints/handlers/helpers.go | 90 + .../pkg/endpoints/handlers/metrics/metrics.go | 16 +- .../pkg/endpoints/handlers/response.go | 252 +- .../pkg/endpoints/handlers/trace_util.go | 5 + .../apiserver/pkg/endpoints/handlers/watch.go | 150 +- .../apiserver/pkg/endpoints/installer.go | 18 +- .../pkg/endpoints/metrics/metrics.go | 73 + .../apiserver/pkg/features/kube_features.go | 61 +- .../generic/registry/storage_factory.go | 2 +- .../pkg/registry/generic/storage_decorator.go | 6 +- vendor/k8s.io/apiserver/pkg/server/config.go | 93 +- .../dynamic_cafile_content.go | 4 +- .../dynamic_serving_content.go | 6 +- .../pkg/server/egressselector/config.go | 4 +- .../server/egressselector/egress_selector.go | 4 +- .../server/filters/priority-and-fairness.go | 72 +- .../apiserver/pkg/server/genericapiserver.go | 10 +- .../apiserver/pkg/server/httplog/httplog.go | 1 - .../pkg/server/options/api_enablement.go | 4 +- .../server/options/encryptionconfig/config.go | 109 +- .../encryptionconfig/controller/controller.go | 179 +- .../encryptionconfig/metrics/metrics.go | 53 +- .../apiserver/pkg/server/options/etcd.go | 25 +- .../apiserver/pkg/server/options/feature.go | 23 +- .../pkg/server/options/recommended.go | 31 +- .../pkg/server/options/server_run_options.go | 7 +- .../apiserver/pkg/server/options/serving.go | 34 +- .../apiserver/pkg/server/routes/metrics.go | 2 + .../apiserver/pkg/server/routes/openapi.go | 5 +- .../pkg/server/storage/storage_factory.go | 20 +- .../pkg/storage/cacher/cache_watcher.go | 11 +- .../apiserver/pkg/storage/cacher/cacher.go | 68 +- vendor/k8s.io/apiserver/pkg/storage/errors.go | 11 +- .../apiserver/pkg/storage/etcd3/event.go | 11 + .../pkg/storage/etcd3/metrics/metrics.go | 19 +- .../apiserver/pkg/storage/etcd3/store.go | 227 +- .../apiserver/pkg/storage/etcd3/watcher.go | 232 +- .../apiserver/pkg/storage/interfaces.go | 13 + .../pkg/storage/storagebackend/config.go | 6 - .../storage/storagebackend/factory/etcd3.go | 4 +- .../storage/storagebackend/factory/factory.go | 4 +- vendor/k8s.io/apiserver/pkg/storage/util.go | 80 + .../value/encrypt/envelope/kmsv2/cache.go | 14 +- .../value/encrypt/envelope/kmsv2/envelope.go | 81 +- .../value/encrypt/envelope/kmsv2/v2/api.pb.go | 9 + .../value/encrypt/envelope/kmsv2/v2/api.proto | 9 + .../value/encrypt/envelope/metrics/metrics.go | 76 +- .../apiserver/pkg/util/apihelpers/helpers.go | 2 +- .../pkg/util/flowcontrol/apf_controller.go | 58 +- .../pkg/util/flowcontrol/apf_filter.go | 13 +- .../util/flowcontrol/fairqueuing/interface.go | 7 +- .../fairqueuing/promise/promise.go | 15 +- .../fairqueuing/queueset/queueset.go | 83 +- .../pkg/util/flowcontrol/format/formatting.go | 14 +- .../pkg/util/flowcontrol/metrics/metrics.go | 16 + .../request/list_work_estimator.go | 8 +- .../apiserver/pkg/util/flowcontrol/rule.go | 2 +- .../pkg/util/webhook/authentication.go | 4 +- .../plugin/pkg/authorizer/webhook/webhook.go | 54 +- .../core/v1/clustertrustbundleprojection.go | 79 + .../core/v1/lifecyclehandler.go | 9 + .../core/v1/loadbalanceringress.go | 13 + .../core/v1/modifyvolumestatus.go | 52 + .../core/v1/persistentvolumeclaimspec.go | 27 +- .../core/v1/persistentvolumeclaimstatus.go | 30 +- .../core/v1/persistentvolumespec.go | 9 + .../core/v1/podaffinityterm.go | 22 + .../v1/sleepaction.go} | 22 +- .../core/v1/volumeprojection.go | 9 + .../core/v1/volumeresourcerequirements.go | 52 + .../exemptprioritylevelconfiguration.go | 2 +- .../flowdistinguishermethod.go | 8 +- .../{v1alpha1 => v1}/flowschema.go | 16 +- .../{v1alpha1 => v1}/flowschemacondition.go | 22 +- .../{v1alpha1 => v1}/flowschemaspec.go | 2 +- .../{v1alpha1 => v1}/flowschemastatus.go | 2 +- .../{v1alpha1 => v1}/groupsubject.go | 2 +- .../limitedprioritylevelconfiguration.go | 12 +- .../{v1alpha1 => v1}/limitresponse.go | 8 +- .../{v1alpha1 => v1}/nonresourcepolicyrule.go | 2 +- .../policyruleswithsubjects.go | 2 +- .../prioritylevelconfiguration.go | 16 +- .../prioritylevelconfigurationcondition.go | 22 +- .../prioritylevelconfigurationreference.go | 2 +- .../prioritylevelconfigurationspec.go | 8 +- .../prioritylevelconfigurationstatus.go | 2 +- .../{v1alpha1 => v1}/queuingconfiguration.go | 2 +- .../{v1alpha1 => v1}/resourcepolicyrule.go | 2 +- .../{v1alpha1 => v1}/serviceaccountsubject.go | 2 +- .../flowcontrol/{v1alpha1 => v1}/subject.go | 8 +- .../{v1alpha1 => v1}/usersubject.go | 2 +- .../applyconfigurations/internal/internal.go | 589 +- .../meta/v1/unstructured.go | 2 +- .../networking/v1alpha1/clustercidrspec.go | 70 - .../networking/v1alpha1/parentreference.go | 21 +- .../{clustercidr.go => servicecidr.go} | 87 +- .../networking/v1alpha1/servicecidrspec.go | 41 + .../networking/v1alpha1/servicecidrstatus.go | 48 + .../policy/v1beta1/allowedflexvolume.go | 39 - .../policy/v1beta1/allowedhostpath.go | 48 - .../policy/v1beta1/fsgroupstrategyoptions.go | 57 - .../policy/v1beta1/hostportrange.go | 48 - .../policy/v1beta1/idrange.go | 48 - .../policy/v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../policy/v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../v1alpha1/volumeattributesclass.go} | 107 +- .../client-go/discovery/discovery_client.go | 28 +- vendor/k8s.io/client-go/informers/factory.go | 10 + .../informers/flowcontrol/interface.go | 12 +- .../{v1alpha1 => v1}/flowschema.go | 26 +- .../flowcontrol/{v1alpha1 => v1}/interface.go | 2 +- .../prioritylevelconfiguration.go | 26 +- vendor/k8s.io/client-go/informers/generic.go | 20 +- .../networking/v1alpha1/interface.go | 14 +- .../{clustercidr.go => servicecidr.go} | 38 +- .../informers/policy/v1beta1/interface.go | 7 - .../policy/v1beta1/podsecuritypolicy.go | 89 - .../informers/storage/v1alpha1/interface.go | 7 + .../storage/v1alpha1/volumeattributesclass.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 16 +- .../kubernetes/fake/clientset_generated.go | 10 +- .../client-go/kubernetes/fake/register.go | 4 +- .../client-go/kubernetes/scheme/register.go | 4 +- .../typed/flowcontrol/{v1alpha1 => v1}/doc.go | 2 +- .../flowcontrol/{v1alpha1 => v1}/fake/doc.go | 0 .../fake/fake_flowcontrol_client.go | 10 +- .../{v1alpha1 => v1}/fake/fake_flowschema.go | 72 +- .../fake/fake_prioritylevelconfiguration.go | 72 +- .../{v1alpha1 => v1}/flowcontrol_client.go | 38 +- .../{v1alpha1 => v1}/flowschema.go | 70 +- .../{v1alpha1 => v1}/generated_expansion.go | 2 +- .../prioritylevelconfiguration.go | 70 +- .../typed/networking/v1alpha1/clustercidr.go | 197 - .../v1alpha1/fake/fake_clustercidr.go | 145 - .../v1alpha1/fake/fake_networking_client.go | 8 +- .../v1alpha1/fake/fake_servicecidr.go | 178 + .../v1alpha1/generated_expansion.go | 4 +- .../networking/v1alpha1/networking_client.go | 10 +- .../typed/networking/v1alpha1/servicecidr.go | 243 + .../v1beta1/fake/fake_podsecuritypolicy.go | 145 - .../policy/v1beta1/fake/fake_policy_client.go | 4 - .../policy/v1beta1/generated_expansion.go | 2 - .../typed/policy/v1beta1/podsecuritypolicy.go | 197 - .../typed/policy/v1beta1/policy_client.go | 5 - .../v1alpha1/fake/fake_storage_client.go | 4 + .../fake/fake_volumeattributesclass.go | 145 + .../storage/v1alpha1/generated_expansion.go | 2 + .../typed/storage/v1alpha1/storage_client.go | 5 + .../storage/v1alpha1/volumeattributesclass.go | 197 + .../{v1alpha1 => v1}/expansion_generated.go | 2 +- .../{v1alpha1 => v1}/flowschema.go | 18 +- .../prioritylevelconfiguration.go | 18 +- .../v1alpha1/expansion_generated.go | 8 +- .../{clustercidr.go => servicecidr.go} | 38 +- .../policy/v1beta1/expansion_generated.go | 4 - .../policy/v1beta1/podsecuritypolicy.go | 68 - .../storage/v1alpha1/expansion_generated.go | 4 + .../storage/v1alpha1/volumeattributesclass.go | 68 + .../k8s.io/client-go/restmapper/shortcut.go | 34 +- .../k8s.io/client-go/tools/cache/reflector.go | 22 +- .../reflector_data_consistency_detector.go | 119 + .../client-go/tools/cache/shared_informer.go | 2 - .../tools/clientcmd/merged_client_builder.go | 4 +- .../tools/events/event_broadcaster.go | 117 +- .../client-go/tools/events/event_recorder.go | 27 +- vendor/k8s.io/client-go/tools/events/fake.go | 7 + .../client-go/tools/events/interfaces.go | 45 +- .../tools/internal/events/interfaces.go | 59 + vendor/k8s.io/client-go/tools/record/event.go | 184 +- vendor/k8s.io/client-go/tools/record/fake.go | 7 + .../client-go/tools/remotecommand/fallback.go | 57 + .../tools/remotecommand/remotecommand.go | 124 - .../client-go/tools/remotecommand/spdy.go | 171 + .../client-go/tools/remotecommand/v5.go | 35 + .../tools/remotecommand/websocket.go | 502 ++ .../k8s.io/client-go/transport/spdy/spdy.go | 12 +- .../k8s.io/client-go/transport/transport.go | 55 + .../transport/websocket/roundtripper.go | 163 + .../k8s.io/client-go/util/workqueue/queue.go | 55 +- vendor/k8s.io/cloud-provider/cloud.go | 8 +- .../k8s.io/cloud-provider/options/options.go | 2 +- .../k8s.io/cloud-provider/options/webhook.go | 2 +- .../cloud-provider/service/helpers/helper.go | 3 + .../k8s.io/component-base/metrics/buckets.go | 10 + .../k8s.io/component-base/metrics/metric.go | 2 +- .../k8s.io/component-base/metrics/options.go | 13 +- vendor/k8s.io/component-base/metrics/opts.go | 24 + .../metrics/prometheus/slis/metrics.go | 4 +- .../k8s.io/component-base/metrics/registry.go | 13 +- .../k8s.io/component-base/tracing/tracing.go | 6 + vendor/k8s.io/component-base/tracing/utils.go | 11 +- .../node/util/sysctl/namespace.go | 104 + .../node/util/sysctl/sysctl.go | 131 + .../pkg/features/kube_features.go | 3 +- vendor/k8s.io/kms/apis/v2/api.pb.go | 14 +- vendor/k8s.io/kms/apis/v2/api.proto | 14 +- vendor/k8s.io/kms/pkg/service/grpc_service.go | 10 - .../kube-openapi/pkg/builder3/openapi.go | 14 +- .../k8s.io/kube-openapi/pkg/cached/cache.go | 268 +- .../k8s.io/kube-openapi/pkg/common/common.go | 38 - .../kube-openapi/pkg/handler/handler.go | 55 +- .../kube-openapi/pkg/handler3/handler.go | 77 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 1 + .../kube-openapi/pkg/openapiconv/convert.go | 322 -- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 21 + .../k8s.io/kube-openapi/pkg/spec3/example.go | 14 + .../pkg/spec3/external_documentation.go | 13 + vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 27 + .../k8s.io/kube-openapi/pkg/spec3/header.go | 31 + .../kube-openapi/pkg/spec3/media_type.go | 20 + .../kube-openapi/pkg/spec3/operation.go | 27 + .../kube-openapi/pkg/spec3/parameter.go | 31 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 47 +- .../kube-openapi/pkg/spec3/request_body.go | 21 + .../k8s.io/kube-openapi/pkg/spec3/response.go | 52 + .../kube-openapi/pkg/spec3/security_scheme.go | 17 + .../k8s.io/kube-openapi/pkg/spec3/server.go | 26 + vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 25 + .../kube-openapi/pkg/validation/spec/fuzz.go | 502 -- .../kubelet/pkg/apis/stats/v1alpha1/types.go | 5 + .../k8s.io/kubernetes/pkg/api/service/util.go | 7 + .../kubernetes/pkg/api/v1/service/util.go | 99 + .../k8s.io/kubernetes/pkg/apis/batch/types.go | 24 +- .../pkg/apis/core/helper/helpers.go | 188 +- .../pkg/apis/core/helper/qos/qos.go | 14 +- .../k8s.io/kubernetes/pkg/apis/core/types.go | 177 +- .../kubernetes/pkg/apis/core/v1/defaults.go | 20 +- .../apis/core/v1/zz_generated.conversion.go | 154 +- .../pkg/apis/core/validation/names.go | 132 + .../pkg/apis/core/validation/validation.go | 965 ++-- .../pkg/apis/core/zz_generated.deepcopy.go | 143 + .../pkg/apis/networking/register.go | 4 +- .../kubernetes/pkg/apis/networking/types.go | 119 +- .../apis/networking/zz_generated.deepcopy.go | 186 +- .../kubernetes/pkg/features/kube_features.go | 359 +- .../pkg/util/filesystem/defaultfs.go | 173 + .../pkg/util/filesystem/filesystem.go | 52 + .../pkg/util/filesystem/util_unix.go | 37 + .../pkg/util/filesystem/util_windows.go | 87 + .../kubernetes/pkg/util/filesystem/watcher.go | 89 + .../kubernetes/pkg/util/parsers/parsers.go | 2 +- .../k8s.io/kubernetes/pkg/volume/plugins.go | 9 +- .../volume/util/hostutil/hostutil_windows.go | 25 +- .../kubernetes/pkg/volume/util/selinux.go | 4 - .../pkg/volume/util/volumeattributesclass.go | 72 + .../test/e2e/framework/.import-restrictions | 47 +- .../kubernetes/test/e2e/framework/OWNERS | 3 +- .../kubernetes/test/e2e/framework/README.md | 2 +- .../kubernetes/test/e2e/framework/bugs.go | 108 + .../test/e2e/framework/events/events.go | 2 +- .../kubernetes/test/e2e/framework/expect.go | 33 +- .../test/e2e/framework/ginkgowrapper.go | 491 +- .../e2e/framework/internal/junit/junit.go | 4 + .../e2e/framework/metrics/metrics_grabber.go | 31 +- .../test/e2e/framework/node/helper.go | 7 +- .../kubernetes/test/e2e/framework/node/ssh.go | 2 +- .../test/e2e/framework/node/wait.go | 6 +- .../kubernetes/test/e2e/framework/pod/get.go | 4 +- .../kubernetes/test/e2e/framework/pv/pv.go | 4 +- .../kubernetes/test/e2e/framework/ssh/ssh.go | 7 +- .../test/e2e/framework/test_context.go | 102 +- .../kubernetes/test/e2e/framework/timeouts.go | 4 + .../kubernetes/test/e2e/framework/util.go | 6 +- .../test/e2e/framework/volume/fixtures.go | 6 +- .../test/e2e/storage/utils/framework.go | 6 +- .../test/e2e/storage/utils/utils.go | 15 +- .../kubectl/httpd-deployment1.yaml.in | 2 + .../sample-device-plugin.yaml | 5 + .../scheduling/nvidia-driver-installer.yaml | 4 +- .../kubernetes/test/utils/image/manifest.go | 6 +- .../k8s.io/kubernetes/test/utils/runners.go | 2 +- .../k8s.io/mount-utils/mount_helper_common.go | 4 +- .../k8s.io/mount-utils/mount_helper_unix.go | 2 +- .../policy/check_runAsNonRoot.go | 5 + .../policy/check_runAsUser.go | 5 + .../policy/check_sysctls.go | 39 +- .../pod-security-admission/policy/helpers.go | 25 +- vendor/modules.txt | 216 +- .../konnectivity-client/pkg/client/client.go | 56 +- .../konnectivity-client/pkg/client/conn.go | 62 +- .../pkg/azclient/Makefile | 9 +- .../pkg/azclient/accountclient/interface.go | 2 +- .../accountclient/zz_generated_client.go | 14 +- .../pkg/azclient/arm_conf.go | 1 + .../cloud-provider-azure/pkg/azclient/auth.go | 53 +- .../zz_generated_client.go | 18 +- .../azclient/blobcontainerclient/interface.go | 2 +- .../zz_generated_client.go | 14 +- .../zz_generated_client.go | 9 +- .../deploymentclient/zz_generated_client.go | 18 +- .../pkg/azclient/diskclient/interface.go | 2 +- .../diskclient/zz_generated_client.go | 32 +- .../pkg/azclient/factory.go | 6 + .../pkg/azclient/factory_gen.go | 999 ++-- .../pkg/azclient/fileshareclient/interface.go | 2 +- .../fileshareclient/zz_generated_client.go | 14 +- .../interfaceclient/zz_generated_client.go | 32 +- .../ipgroupclient/zz_generated_client.go | 32 +- .../loadbalancerclient/zz_generated_client.go | 32 +- .../zz_generated_client.go | 32 +- .../pkg/azclient/mock_azclient/interface.go | 581 ++ .../zz_generated_client.go | 20 +- .../zz_generated_client.go | 32 +- .../privatezoneclient/zz_generated_client.go | 20 +- .../providerclient/zz_generated_client.go | 9 +- .../zz_generated_client.go | 32 +- .../zz_generated_client.go | 32 +- .../registryclient/zz_generated_client.go | 26 +- .../zz_generated_client.go | 9 +- .../routetableclient/zz_generated_client.go | 32 +- .../secretclient/zz_generated_client.go | 18 +- .../zz_generated_client.go | 32 +- .../snapshotclient/zz_generated_client.go | 28 +- .../zz_generated_client.go | 18 +- .../subnetclient/zz_generated_client.go | 32 +- .../pkg/azclient/utils/const.go | 2 +- .../pkg/azclient/utils/consts.go | 22 + .../vaultclient/zz_generated_client.go | 18 +- .../azclient/virtualmachineclient/custom.go | 21 + .../virtualmachineclient/interface.go | 6 + .../zz_generated_client.go | 28 +- .../zz_generated_client.go | 28 +- .../virtualmachinescalesetvmclient/custom.go | 31 +- .../interface.go | 5 + .../zz_generated_client.go | 22 +- .../zz_generated_client.go | 32 +- .../virtualnetworklinkclient/interface.go | 30 + .../zz_generated_client.go | 92 + .../diskclient/mockdiskclient/interface.go | 17 +- .../mockinterfaceclient/interface.go | 15 +- .../mockloadbalancerclient/interface.go | 23 +- .../azure_privatednsclient.go | 230 - .../privatednsclient/interface.go | 42 - .../mockprivatelinkserviceclient/interface.go | 17 +- .../mockpublicipclient/interface.go | 19 +- .../routeclient/mockrouteclient/interface.go | 11 +- .../mockroutetableclient/interface.go | 11 +- .../mocksecuritygroupclient/interface.go | 15 +- .../mocksnapshotclient/interface.go | 15 +- .../mockstorageaccountclient/interface.go | 19 +- .../mocksubnetclient/interface.go | 15 +- .../azure_virtualnetworklinksclient.go | 227 - .../virtualnetworklinksclient/interface.go | 43 - .../vmclient/mockvmclient/interface.go | 25 +- .../vmssclient/mockvmssclient/interface.go | 33 +- .../mockvmssvmclient/interface.go | 19 +- .../cloud-provider-azure/pkg/consts/consts.go | 2 + .../pkg/provider/azure.go | 74 +- .../pkg/provider/azure_controller_common.go | 69 +- .../pkg/provider/azure_controller_standard.go | 22 +- .../pkg/provider/azure_controller_vmss.go | 22 +- .../pkg/provider/azure_controller_vmssflex.go | 22 +- .../pkg/provider/azure_fakes.go | 4 +- .../pkg/provider/azure_loadbalancer.go | 603 +- .../azure_mock_loadbalancer_backendpool.go | 2 +- .../pkg/provider/azure_mock_vmsets.go | 2 +- .../pkg/provider/azure_standard.go | 13 +- .../pkg/provider/azure_storageaccount.go | 38 +- .../pkg/provider/azure_utils.go | 29 +- .../provider/loadbalancer/accesscontrol.go | 336 +- .../provider/loadbalancer/configuration.go | 134 + .../pkg/provider/loadbalancer/fnutil/map.go | 25 + .../pkg/provider/loadbalancer/fnutil/slice.go | 44 + .../pkg/provider/loadbalancer/iputil/addr.go | 49 + .../provider/loadbalancer/iputil/family.go | 55 + .../{netip.go => iputil/prefix.go} | 23 +- .../securitygroup/securitygroup.go | 461 ++ .../v4/fieldpath/pathelementmap.go | 45 +- .../v4/merge/conflict.go | 2 +- .../structured-merge-diff/v4/merge/update.go | 72 +- .../v4/schema/elements.go | 3 +- .../v4/schema/schemaschema.go | 3 +- .../structured-merge-diff/v4/typed/compare.go | 460 ++ .../structured-merge-diff/v4/typed/helpers.go | 21 +- .../structured-merge-diff/v4/typed/merge.go | 61 +- .../structured-merge-diff/v4/typed/parser.go | 12 +- .../structured-merge-diff/v4/typed/remove.go | 4 +- .../v4/typed/tofieldset.go | 24 +- .../structured-merge-diff/v4/typed/typed.go | 187 +- .../structured-merge-diff/v4/typed/union.go | 276 - .../v4/typed/validate.go | 14 +- .../v4/value/mapreflect.go | 2 +- .../v4/value/mapunstructured.go | 8 +- .../v4/value/reflectcache.go | 4 +- 784 files changed, 41560 insertions(+), 26086 deletions(-) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go create mode 100644 vendor/github.com/distribution/reference/.gitattributes create mode 100644 vendor/github.com/distribution/reference/.gitignore create mode 100644 vendor/github.com/distribution/reference/.golangci.yml create mode 100644 vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/distribution/reference/CONTRIBUTING.md create mode 100644 vendor/github.com/distribution/reference/GOVERNANCE.md rename vendor/github.com/{docker/distribution => distribution/reference}/LICENSE (100%) create mode 100644 vendor/github.com/distribution/reference/MAINTAINERS create mode 100644 vendor/github.com/distribution/reference/Makefile create mode 100644 vendor/github.com/distribution/reference/README.md create mode 100644 vendor/github.com/distribution/reference/SECURITY.md create mode 100644 vendor/github.com/distribution/reference/distribution-logo.svg rename vendor/github.com/{docker => }/distribution/reference/helpers.go (94%) rename vendor/github.com/{docker => }/distribution/reference/normalize.go (52%) rename vendor/github.com/{docker => }/distribution/reference/reference.go (93%) create mode 100644 vendor/github.com/distribution/reference/regexp.go create mode 100644 vendor/github.com/distribution/reference/sort.go delete mode 100644 vendor/github.com/docker/distribution/digestset/set.go delete mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/go-logr/logr/context.go create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 vendor/github.com/go-logr/logr/context_slog.go create mode 100644 vendor/github.com/go-logr/logr/funcr/slogsink.go rename vendor/github.com/go-logr/logr/{slogr => }/sloghandler.go (63%) create mode 100644 vendor/github.com/go-logr/logr/slogr.go rename vendor/github.com/go-logr/logr/{slogr => }/slogsink.go (82%) delete mode 100644 vendor/github.com/golang/mock/CONTRIBUTORS create mode 100644 vendor/github.com/google/cel-go/cel/validator.go create mode 100644 vendor/github.com/google/cel-go/checker/format.go rename vendor/github.com/google/cel-go/checker/{decls => }/scopes.go (81%) create mode 100644 vendor/github.com/google/cel-go/common/ast/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/ast/ast.go create mode 100644 vendor/github.com/google/cel-go/common/ast/expr.go create mode 100644 vendor/github.com/google/cel-go/common/decls/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/decls/decls.go create mode 100644 vendor/github.com/google/cel-go/common/functions/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/functions/functions.go create mode 100644 vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/stdlib/standard.go delete mode 100644 vendor/github.com/google/cel-go/common/types/type.go create mode 100644 vendor/github.com/google/cel-go/common/types/types.go create mode 100644 vendor/github.com/google/cel-go/ext/lists.go delete mode 100644 vendor/github.com/google/cel-go/interpreter/functions/standard.go create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/mxk/go-flowrate/LICENSE create mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go create mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/io.go create mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/util.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go delete mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go rename vendor/{github.com/golang => go.uber.org}/mock/AUTHORS (100%) rename vendor/{github.com/golang => go.uber.org}/mock/LICENSE (100%) rename vendor/{github.com/golang => go.uber.org}/mock/gomock/call.go (78%) rename vendor/{github.com/golang => go.uber.org}/mock/gomock/callset.go (70%) rename vendor/{github.com/golang => go.uber.org}/mock/gomock/controller.go (67%) create mode 100644 vendor/go.uber.org/mock/gomock/doc.go rename vendor/{github.com/golang => go.uber.org}/mock/gomock/matchers.go (63%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/doc.go (73%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/generated.pb.go (91%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/generated.proto (94%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/register.go (95%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/types.go (88%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/types_swagger_doc_generated.go (95%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/zz_generated.deepcopy.go (99%) delete mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go rename vendor/{sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient => k8s.io/apimachinery/pkg/util/proxy}/doc.go (68%) create mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/common/equality.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go rename vendor/k8s.io/client-go/applyconfigurations/{policy/v1beta1/allowedcsidriver.go => core/v1/sleepaction.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/exemptprioritylevelconfiguration.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowdistinguishermethod.go (87%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschema.go (94%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschemacondition.go (81%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschemaspec.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschemastatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/groupsubject.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/limitedprioritylevelconfiguration.go (90%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/limitresponse.go (88%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/nonresourcepolicyrule.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/policyruleswithsubjects.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (94%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationcondition.go (81%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationreference.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationspec.go (92%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/queuingconfiguration.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/resourcepolicyrule.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/serviceaccountsubject.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/subject.go (92%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/usersubject.go (98%) delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go rename vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/{clustercidr.go => servicecidr.go} (68%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go rename vendor/k8s.io/client-go/applyconfigurations/{policy/v1beta1/podsecuritypolicy.go => storage/v1alpha1/volumeattributesclass.go} (59%) rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1}/flowschema.go (78%) rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1}/interface.go (99%) rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (76%) rename vendor/k8s.io/client-go/informers/networking/v1alpha1/{clustercidr.go => servicecidr.go} (69%) delete mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/doc.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/fake/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/fake/fake_flowcontrol_client.go (72%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/fake/fake_flowschema.go (67%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/fake/fake_prioritylevelconfiguration.go (64%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/flowcontrol_client.go (64%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/flowschema.go (69%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/generated_expansion.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (69%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/expansion_generated.go (98%) rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/flowschema.go (79%) rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (79%) rename vendor/k8s.io/client-go/listers/networking/v1alpha1/{clustercidr.go => servicecidr.go} (54%) delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/tools/internal/events/interfaces.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/fallback.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/spdy.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v5.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/websocket.go create mode 100644 vendor/k8s.io/client-go/transport/websocket/roundtripper.go create mode 100644 vendor/k8s.io/component-helpers/node/util/sysctl/namespace.go create mode 100644 vendor/k8s.io/component-helpers/node/util/sysctl/sysctl.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/volumeattributesclass.go create mode 100644 vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/consts.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/interface.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/zz_generated_client.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/map.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/slice.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/addr.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/family.go rename vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/{netip.go => iputil/prefix.go} (56%) create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go diff --git a/go.mod b/go.mod index aee1261cd..2386ee967 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/container-storage-interface/spec v1.8.0 github.com/gofrs/uuid v4.4.0+incompatible // indirect - github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/imdario/mergo v0.3.9 // indirect github.com/kubernetes-csi/csi-lib-utils v0.16.0 @@ -20,26 +19,27 @@ require ( github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/stretchr/testify v1.8.4 + go.uber.org/mock v0.4.0 golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 - k8s.io/api v0.28.4 - k8s.io/apimachinery v0.28.4 - k8s.io/client-go v0.28.4 - k8s.io/component-base v0.28.4 + k8s.io/api v0.29.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 + k8s.io/component-base v0.29.0 k8s.io/klog/v2 v2.110.1 - k8s.io/kubernetes v1.28.5 - k8s.io/mount-utils v0.28.4 + k8s.io/kubernetes v1.29.0 + k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 sigs.k8s.io/yaml v1.4.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 @@ -65,13 +65,12 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-ini/ini v1.67.0 - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -81,12 +80,12 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/cel-go v0.16.1 // indirect + github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230602010524-ada837c32108 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -102,7 +101,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo/v2 v2.13.2 github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/selinux v1.10.0 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect @@ -114,9 +113,9 @@ require ( github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/v3 v3.5.9 // indirect + go.etcd.io/etcd/api/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect go.opentelemetry.io/otel v1.20.0 // indirect @@ -132,7 +131,7 @@ require ( golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.5.0 + golang.org/x/sync v0.6.0 golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect @@ -147,49 +146,55 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/apiserver v0.28.4 - k8s.io/cloud-provider v0.28.4 // indirect - k8s.io/component-helpers v0.28.4 // indirect - k8s.io/controller-manager v0.28.4 // indirect - k8s.io/kms v0.28.4 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/apiserver v0.29.0 + k8s.io/cloud-provider v0.29.0 // indirect + k8s.io/component-helpers v0.29.0 // indirect + k8s.io/controller-manager v0.29.0 // indirect + k8s.io/kms v0.29.0 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.28.4 // indirect - k8s.io/pod-security-admission v0.28.4 - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e + k8s.io/kubelet v0.29.0 // indirect + k8s.io/pod-security-admission v0.29.0 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) + +require ( + github.com/distribution/reference v0.5.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect ) replace ( - k8s.io/api => k8s.io/api v0.28.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.28.4 - k8s.io/apiserver => k8s.io/apiserver v0.28.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.4 - k8s.io/client-go => k8s.io/client-go v0.28.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4 - k8s.io/code-generator => k8s.io/code-generator v0.28.4 - k8s.io/component-base => k8s.io/component-base v0.28.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.28.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.28.4 - k8s.io/cri-api => k8s.io/cri-api v0.28.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4 - k8s.io/endpointslice => k8s.io/endpointslice v0.28.4 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4 - k8s.io/kubectl => k8s.io/kubectl v0.28.4 - k8s.io/kubelet => k8s.io/kubelet v0.28.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4 - k8s.io/metrics => k8s.io/metrics v0.28.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.28.4 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.4 - k8s.io/sample-controller => k8s.io/sample-controller v0.28.4 + k8s.io/api => k8s.io/api v0.29.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.0 + k8s.io/apiserver => k8s.io/apiserver v0.29.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0 + k8s.io/client-go => k8s.io/client-go v0.29.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0 + k8s.io/code-generator => k8s.io/code-generator v0.29.0 + k8s.io/component-base => k8s.io/component-base v0.29.0 + k8s.io/component-helpers => k8s.io/component-helpers v0.29.0 + k8s.io/controller-manager => k8s.io/controller-manager v0.29.0 + k8s.io/cri-api => k8s.io/cri-api v0.29.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0 + k8s.io/endpointslice => k8s.io/endpointslice v0.29.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0 + k8s.io/kubectl => k8s.io/kubectl v0.29.0 + k8s.io/kubelet => k8s.io/kubelet v0.29.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0 + k8s.io/metrics => k8s.io/metrics v0.29.0 + k8s.io/mount-utils => k8s.io/mount-utils v0.29.0 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.0 + k8s.io/sample-controller => k8s.io/sample-controller v0.29.0 ) diff --git a/go.sum b/go.sum index a1f1b1e8a..3eacd79f9 100644 --- a/go.sum +++ b/go.sum @@ -5,14 +5,14 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 h1:qgs/VAMSR+9qFhwTw4OwF2NbVuw+2m83pVZJjqkKQMw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 h1:QfV5XZt6iNa2aWMAt96CZEbfJ7kgG/qYIpq465Shr5E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 h1:AAIdAyPkFff6XTct2lQCxOWN/+LnA41S7kIkzKaMbyE= @@ -86,14 +86,14 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= @@ -105,8 +105,9 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= @@ -134,8 +135,6 @@ github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -143,8 +142,8 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= -github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -157,10 +156,11 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20230602010524-ada837c32108 h1:y+JfwMOPwQwIrnh3TUPwwtOAhONoppkHiSa4sQBoK2k= github.com/google/pprof v0.0.0-20230602010524-ada837c32108/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -208,14 +208,16 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -270,24 +272,23 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= -go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= -go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= -go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= -go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= -go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= -go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= -go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= -go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= -go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= -go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= -go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= -go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= @@ -312,6 +313,8 @@ go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -330,7 +333,6 @@ golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqR golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -341,7 +343,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -353,18 +354,14 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -393,7 +390,6 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= @@ -433,55 +429,55 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= -k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= -k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= -k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg= -k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/cloud-provider v0.28.4 h1:7obmeuJJ5CYTO9HANDqemf/d2v95U+F0t8aeH4jNOsQ= -k8s.io/cloud-provider v0.28.4/go.mod h1:xbhmGZ7wRHgXFP3SNsvdmFRO87KJIvirDYQA5ydMgGA= -k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo= -k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU= -k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4= -k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro= -k8s.io/controller-manager v0.28.4 h1:8uJmo1pD6fWYk4mC/JfZQU6zPvuCgEHf3pd5G39ldDU= -k8s.io/controller-manager v0.28.4/go.mod h1:pnO+UK2mcWNu1MxucqI8xHPD/8UBm04IUmp2u/3vbnM= -k8s.io/csi-translation-lib v0.28.4 h1:4TrU2zefZGU5HQCyPZvcPxkS6IowqZ/jBs2Qi/dPUpc= -k8s.io/csi-translation-lib v0.28.4/go.mod h1:oxwDdx0hyVqViINOUF7TGrVt51eqsOkQ0BTI+A9QcQs= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= +k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/cloud-provider v0.29.0 h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M= +k8s.io/cloud-provider v0.29.0/go.mod h1:gBCt7YYKFV4oUcJ/0xF9lS/9il4MxKunJ+ZKvh39WGo= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU= +k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc= +k8s.io/controller-manager v0.29.0 h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc= +k8s.io/controller-manager v0.29.0/go.mod h1:UKtadWkULF5bfX7vu3hHppzY/hz88C03t70GItg/x08= +k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= +k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg= -k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= -k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= -k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw= -k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o= -k8s.io/kubernetes v1.28.5 h1:sqgm0Tk6lqfsfcLUxZ0rfNmtugtABSLUhCqhXtfV1C0= -k8s.io/kubernetes v1.28.5/go.mod h1:lRbvAZMgn+BrOtymmJAp85Jsa7GlL01av29axiPJ+E0= -k8s.io/mount-utils v0.28.4 h1:5GOZLm2dXi2fr+MKY8hS6kdV5reXrZBiK7848O5MVD0= -k8s.io/mount-utils v0.28.4/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= -k8s.io/pod-security-admission v0.28.4 h1:b9d6zfKNjkawrO2gF7rBr5XoSZqPfE6UjKLNjgXYrr0= -k8s.io/pod-security-admission v0.28.4/go.mod h1:MVYrZx0Q6ewsZ05Ml2+Ox03HQMAVjO60oombQNmJ44E= +k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= +k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk= +k8s.io/kubelet v0.29.0/go.mod h1:kvKS2+Bz2tgDOG1S1q0TH2z1DasNuVF+8p6Aw7xvKkI= +k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= +k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= +k8s.io/mount-utils v0.29.0 h1:KcUE0bFHONQC10V3SuLWQ6+l8nmJggw9lKLpDftIshI= +k8s.io/mount-utils v0.29.0/go.mod h1:N3lDK/G1B8R/IkAt4NhHyqB07OqEr7P763z3TNge94U= +k8s.io/pod-security-admission v0.29.0 h1:tY/ldtkbBCulMYVSWg6ZDLlgDYDWy6rLj8e/AgmwSj4= +k8s.io/pod-security-admission v0.29.0/go.mod h1:bGIeKCzU0Q0Nl185NHmqcMCiOjTcqTrBfAQaeupwq0E= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb h1:YApm24ngCVkpQTUxu0/wYV/oiccfqWEPZnX1BbpadKY= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb/go.mod h1:UkVMiNELbKLa07K/ubQ+vg8AK3XFyd2FMr5vCIYk0Pg= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e h1:U001A7jnOOi8eiYceYeCLK2S3rTX4K2atR8uNDw+SL8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e/go.mod h1:dckGAqm0wUQNqqvCEeWhfXKL7DB/r9zchDq9xdcF/Qk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 h1:omR8VXOIPc20bbG2zxImx9WHLPnTXK2uo9q3LNPanMg= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b h1:onCsa2FoC9HGIgW+eQYJI8/IZnefwCcU9rF7ZKtD7f0= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b/go.mod h1:seH99Elt7KgWOOonCmzRcB1yLouqK7B7+l8RoSbqaYE= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 h1:0XsdZlKjVI0UZYhvg3VbXCPFRRQS5VL1idrTKgzJjnc= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9/go.mod h1:dDc0Ixf5VI01TkTj83ENW1hH5jImGJsdKhQgFRyQsyA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 4ba69d1a7..2ed0d3654 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -26,7 +26,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "github.com/Azure/azure-sdk-for-go/storage" - "github.com/golang/mock/gomock" + "go.uber.org/mock/gomock" "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 1e4b21b4d..86cc92459 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -29,8 +29,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" "golang.org/x/sync/errgroup" v1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 02eea3d91..abf82d459 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -26,8 +26,8 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/util/wait" diff --git a/pkg/blob/nodeserver_test.go b/pkg/blob/nodeserver_test.go index 91e4909d2..e22c1bc28 100644 --- a/pkg/blob/nodeserver_test.go +++ b/pkg/blob/nodeserver_test.go @@ -36,8 +36,8 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" mount "k8s.io/mount-utils" utilexec "k8s.io/utils/exec" diff --git a/pkg/util/util_mock.go b/pkg/util/util_mock.go index 6764d3fcf..3420fd845 100644 --- a/pkg/util/util_mock.go +++ b/pkg/util/util_mock.go @@ -24,7 +24,7 @@ package util import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockEXEC is a mock of EXEC interface. diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index edbf6d0d3..0e97a7b5f 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -23,8 +23,8 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) func TestRoundUpBytes(t *testing.T) { diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 8c6b3c641..3584fc746 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -63,8 +63,17 @@ type testCmd struct { } func TestMain(m *testing.M) { - handleFlags() + flag.StringVar(&projectRoot, "project-root", "", "path to the blob csi driver project root, used for script execution") + flag.Parse() + if projectRoot == "" { + klog.Fatal("project-root must be set") + } + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + flag.Parse() + framework.AfterReadingAllFlags(&framework.TestContext) os.Exit(m.Run()) } @@ -229,27 +238,5 @@ func checkAccountCreationLeak(ctx context.Context) { ginkgo.By(fmt.Sprintf("GetAccountNumByResourceGroup(%s) returns %d accounts", creds.ResourceGroup, accountNum)) accountLimitInTest := 20 - framework.ExpectEqual(accountNum >= accountLimitInTest, false, fmt.Sprintf("current account num %d should not exceed %d", accountNum, accountLimitInTest)) -} - -// handleFlags sets up all flags and parses the command line. -func handleFlags() { - registerFlags() - handleFramworkFlags() -} - -func registerFlags() { - flag.StringVar(&projectRoot, "project-root", "", "path to the blob csi driver project root, used for script execution") - flag.Parse() - if projectRoot == "" { - klog.Fatal("project-root must be set") - } -} - -func handleFramworkFlags() { - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) - flag.Parse() - framework.AfterReadingAllFlags(&framework.TestContext) + gomega.Expect(accountNum >= accountLimitInTest).To(gomega.BeFalse()) } diff --git a/test/e2e/testsuites/testsuites.go b/test/e2e/testsuites/testsuites.go index 7a4656aa5..ed94145df 100644 --- a/test/e2e/testsuites/testsuites.go +++ b/test/e2e/testsuites/testsuites.go @@ -234,7 +234,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteMany, }, - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), }, diff --git a/test/utils/azure/azure_helper.go b/test/utils/azure/azure_helper.go index a368e56a2..c5e5d7be4 100644 --- a/test/utils/azure/azure_helper.go +++ b/test/utils/azure/azure_helper.go @@ -47,10 +47,7 @@ func GetClient(cloud, subscriptionID, clientID, tenantID, clientSecret string) ( if err != nil { return nil, err } - cred, err := credProvider.GetAzIdentity() - if err != nil { - return nil, err - } + cred := credProvider.GetAzIdentity() factory, err := azclient.NewClientFactory(&azclient.ClientFactoryConfig{ SubscriptionID: subscriptionID, }, armConfig, cred) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index aa30abf37..5c8411cb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.9.1 (2023-12-11) + +### Bugs Fixed + +* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. + +### Other Changes + +* Update dependencies. + ## 1.9.0 (2023-11-06) ### Breaking Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 272f06155..bb93daee6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -22,11 +22,13 @@ const ( HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" HeaderXMSRequestID = "x-ms-request-id" HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" ) const BearerTokenPrefix = "Bearer " @@ -38,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.0" + Version = "v1.9.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index 16bc105f4..d3da2c5fd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -44,22 +44,64 @@ func Delay(ctx context.Context, delay time.Duration) error { } } -// RetryAfter returns non-zero if the response contains a Retry-After header value. +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after func RetryAfter(resp *http.Response) time.Duration { if resp == nil { return 0 } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } + } + return 0 } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index 77577d574..b19244d46 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,64 @@ # Release History +## 5.4.0 (2023-12-22) +### Features Added + +- New value `ConfidentialVMEncryptionTypeNonPersistedTPM` added to enum type `ConfidentialVMEncryptionType` +- New value `ReplicationStatusTypesUefiSettings` added to enum type `ReplicationStatusTypes` +- New value `SecurityEncryptionTypesNonPersistedTPM` added to enum type `SecurityEncryptionTypes` +- New enum type `Mode` with values `ModeAudit`, `ModeEnforce` +- New enum type `SSHEncryptionTypes` with values `SSHEncryptionTypesEd25519`, `SSHEncryptionTypesRSA` +- New enum type `UefiKeyType` with values `UefiKeyTypeSHA256`, `UefiKeyTypeX509` +- New enum type `UefiSignatureTemplateName` with values `UefiSignatureTemplateNameMicrosoftUefiCertificateAuthorityTemplate`, `UefiSignatureTemplateNameMicrosoftWindowsTemplate`, `UefiSignatureTemplateNameNoSignatureTemplate` +- New function `*DedicatedHostsClient.BeginRedeploy(context.Context, string, string, string, *DedicatedHostsClientBeginRedeployOptions) (*runtime.Poller[DedicatedHostsClientRedeployResponse], error)` +- New function `*VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse], error)` +- New function `*VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks(context.Context, string, string, string, AttachDetachDataDisksRequest, *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse], error)` +- New function `*VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade(context.Context, string, string, *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetsClientApproveRollingUpgradeResponse], error)` +- New function `*VirtualMachinesClient.BeginAttachDetachDataDisks(context.Context, string, string, AttachDetachDataDisksRequest, *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[VirtualMachinesClientAttachDetachDataDisksResponse], error)` +- New struct `AttachDetachDataDisksRequest` +- New struct `CommunityGalleryMetadata` +- New struct `CommunityGalleryProperties` +- New struct `DataDisksToAttach` +- New struct `DataDisksToDetach` +- New struct `EncryptionIdentity` +- New struct `GalleryImageVersionUefiSettings` +- New struct `ImageVersionSecurityProfile` +- New struct `ProxyAgentSettings` +- New struct `ResiliencyPolicy` +- New struct `ResilientVMCreationPolicy` +- New struct `ResilientVMDeletionPolicy` +- New struct `ResourceSharingProfile` +- New struct `SSHGenerateKeyPairInputParameters` +- New struct `SharedGalleryProperties` +- New struct `UefiKey` +- New struct `UefiKeySignatures` +- New field `OSRollingUpgradeDeferral` in struct `AutomaticOSUpgradePolicy` +- New field `SharedSubscriptionIDs` in struct `CapacityReservationGroupInstanceView` +- New field `SharingProfile` in struct `CapacityReservationGroupProperties` +- New field `Properties` in struct `CommunityGallery` +- New field `ArtifactTags`, `Disclaimer` in struct `CommunityGalleryImageProperties` +- New field `ArtifactTags`, `Disclaimer` in struct `CommunityGalleryImageVersionProperties` +- New field `SecurityProfile` in struct `GalleryImageVersionProperties` +- New field `DiskControllerType` in struct `RestorePointSourceVMStorageProfile` +- New field `Parameters` in struct `SSHPublicKeysClientGenerateKeyPairOptions` +- New field `EncryptionIdentity`, `ProxyAgentSettings` in struct `SecurityProfile` +- New field `Properties` in struct `SharedGallery` +- New field `ArtifactTags` in struct `SharedGalleryImageProperties` +- New field `ArtifactTags` in struct `SharedGalleryImageVersionProperties` +- New field `Etag`, `ManagedBy` in struct `VirtualMachine` +- New field `IsVMInStandbyPool` in struct `VirtualMachineInstanceView` +- New field `Etag` in struct `VirtualMachineScaleSet` +- New field `ResiliencyPolicy` in struct `VirtualMachineScaleSetProperties` +- New field `ResiliencyPolicy` in struct `VirtualMachineScaleSetUpdateProperties` +- New field `Etag` in struct `VirtualMachineScaleSetVM` +- New field `TimeCreated` in struct `VirtualMachineScaleSetVMProfile` +- New field `IfMatch`, `IfNoneMatch` in struct `VirtualMachineScaleSetVMsClientBeginUpdateOptions` +- New field `IfMatch`, `IfNoneMatch` in struct `VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions` +- New field `IfMatch`, `IfNoneMatch` in struct `VirtualMachineScaleSetsClientBeginUpdateOptions` +- New field `IfMatch`, `IfNoneMatch` in struct `VirtualMachinesClientBeginCreateOrUpdateOptions` +- New field `IfMatch`, `IfNoneMatch` in struct `VirtualMachinesClientBeginUpdateOptions` + + ## 5.3.0 (2023-11-24) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md index 87ce9c30e..7c8d1e3e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md @@ -1,6 +1,6 @@ # Azure Compute Module for Go -[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5) The `armcompute` module provides operations for working with Azure Compute. @@ -20,7 +20,7 @@ This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for ve Install the Azure Compute module: ```sh -go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 +go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 ``` ## Authorization diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index 109819f62..1feae4c5c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_367222fc88" + "Tag": "go/resourcemanager/compute/armcompute_323718962d" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index f40cc3252..9d88c8a6d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/c78b5d8bd3aff2d82a5f034d9164b1a9ac030e09/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/c78b5d8bd3aff2d82a5f034d9164b1a9ac030e09/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.3.0 -tag: package-2023-07-01 +module-version: 5.4.0 +tag: package-2023-09-01 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go index ce901d88a..3b568f54a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go @@ -47,7 +47,7 @@ func NewAvailabilitySetsClient(subscriptionID string, credential azcore.TokenCre // CreateOrUpdate - Create or update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Create Availability Set operation. @@ -95,7 +95,7 @@ func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Re // Delete - Delete an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientDeleteOptions contains the optional parameters for the AvailabilitySetsClient.Delete method. @@ -161,7 +161,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -170,7 +170,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r // Get - Retrieves information about an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientGetOptions contains the optional parameters for the AvailabilitySetsClient.Get method. @@ -216,7 +216,7 @@ func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -233,7 +233,7 @@ func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (Av // NewListPager - Lists all availability sets in a resource group. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - AvailabilitySetsClientListOptions contains the optional parameters for the AvailabilitySetsClient.NewListPager // method. @@ -276,7 +276,7 @@ func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (A // NewListAvailableSizesPager - Lists all available virtual machine sizes that can be used to create a new virtual machine // in an existing availability set. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientListAvailableSizesOptions contains the optional parameters for the AvailabilitySetsClient.NewListAvailableSizesPager @@ -343,7 +343,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesHandleResponse(resp *htt // NewListBySubscriptionPager - Lists all availability sets in a subscription. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - AvailabilitySetsClientListBySubscriptionOptions contains the optional parameters for the AvailabilitySetsClient.NewListBySubscriptionPager // method. func (client *AvailabilitySetsClient) NewListBySubscriptionPager(options *AvailabilitySetsClientListBySubscriptionOptions) *runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse] { @@ -398,7 +398,7 @@ func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } @@ -419,7 +419,7 @@ func (client *AvailabilitySetsClient) listBySubscriptionHandleResponse(resp *htt // Update - Update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Update Availability Set operation. @@ -466,7 +466,7 @@ func (client *AvailabilitySetsClient) updateCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go index 246dbd2b5..96e32e966 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go @@ -45,11 +45,11 @@ func NewCapacityReservationGroupsClient(subscriptionID string, credential azcore } // CreateOrUpdate - The operation to create or update a capacity reservation group. When updating a capacity reservation group, -// only tags may be modified. Please refer to https://aka.ms/CapacityReservation for more -// details. +// only tags and sharing profile may be modified. Please refer to +// https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Create capacity reservation Group. @@ -97,7 +97,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateHandleResponse(resp // the reservation group have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientDeleteOptions contains the optional parameters for the CapacityReservationGroupsClient.Delete @@ -166,7 +166,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +175,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C // Get - The operation that retrieves information about a capacity reservation group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientGetOptions contains the optional parameters for the CapacityReservationGroupsClient.Get @@ -225,7 +225,7 @@ func (client *CapacityReservationGroupsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -243,7 +243,7 @@ func (client *CapacityReservationGroupsClient) getHandleResponse(resp *http.Resp // NewListByResourceGroupPager - Lists all of the capacity reservation groups in the specified resource group. Use the nextLink // property in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - CapacityReservationGroupsClientListByResourceGroupOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListByResourceGroupPager // method. @@ -286,7 +286,7 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } @@ -307,7 +307,7 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupHandleResponse // NewListBySubscriptionPager - Lists all of the capacity reservation groups in the subscription. Use the nextLink property // in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - CapacityReservationGroupsClientListBySubscriptionOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListBySubscriptionPager // method. func (client *CapacityReservationGroupsClient) NewListBySubscriptionPager(options *CapacityReservationGroupsClientListBySubscriptionOptions) *runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse] { @@ -345,7 +345,7 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } @@ -363,11 +363,11 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionHandleResponse( return result, nil } -// Update - The operation to update a capacity reservation group. When updating a capacity reservation group, only tags may -// be modified. +// Update - The operation to update a capacity reservation group. When updating a capacity reservation group, only tags and +// sharing profile may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Update capacity reservation Group operation. @@ -415,7 +415,7 @@ func (client *CapacityReservationGroupsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go index 3b76610c7..951b0ac91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go @@ -49,7 +49,7 @@ func NewCapacityReservationsClient(subscriptionID string, credential azcore.Toke // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -78,7 +78,7 @@ func (client *CapacityReservationsClient) BeginCreateOrUpdate(ctx context.Contex // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *CapacityReservationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginCreateOrUpdate" @@ -124,7 +124,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -138,7 +138,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -166,7 +166,7 @@ func (client *CapacityReservationsClient) BeginDelete(ctx context.Context, resou // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *CapacityReservationsClient) deleteOperation(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginDelete" @@ -212,7 +212,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -221,7 +221,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex // Get - The operation that retrieves information about the capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -276,7 +276,7 @@ func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *CapacityReservationsClient) getHandleResponse(resp *http.Response) // NewListByCapacityReservationGroupPager - Lists all of the capacity reservations in the specified capacity reservation group. // Use the nextLink property in the response to get the next page of capacity reservations. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationsClientListByCapacityReservationGroupOptions contains the optional parameters for the CapacityReservationsClient.NewListByCapacityReservationGroupPager @@ -342,7 +342,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleRe // BeginUpdate - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -387,7 +387,7 @@ func (client *CapacityReservationsClient) BeginUpdate(ctx context.Context, resou // Update - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *CapacityReservationsClient) update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginUpdate" @@ -433,7 +433,7 @@ func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go index 3cda3a9d0..2bfc798f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleriesClient(subscriptionID string, credential azcore.TokenC // Get - Get a community gallery by gallery public name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *CommunityGalleriesClient) getCreateRequest(ctx context.Context, lo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go index 8d1c8c534..25b118f07 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImagesClient(subscriptionID string, credential azcore.To // Get - Get a community gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -99,7 +99,7 @@ func (client *CommunityGalleryImagesClient) getCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -116,7 +116,7 @@ func (client *CommunityGalleryImagesClient) getHandleResponse(resp *http.Respons // NewListPager - List community gallery images inside a gallery. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleryImagesClientListOptions contains the optional parameters for the CommunityGalleryImagesClient.NewListPager @@ -164,7 +164,7 @@ func (client *CommunityGalleryImagesClient) listCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go index f1c46f849..5e382b36d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImageVersionsClient(subscriptionID string, credential az // Get - Get a community gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -106,7 +106,7 @@ func (client *CommunityGalleryImageVersionsClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *CommunityGalleryImageVersionsClient) getHandleResponse(resp *http. // NewListPager - List community gallery image versions inside an image. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -176,7 +176,7 @@ func (client *CommunityGalleryImageVersionsClient) listCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index 4554f9355..d635d4f51 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.3.0" + moduleVersion = "v5.4.0" ) type AccessLevel string @@ -214,6 +214,7 @@ const ( ConfidentialVMEncryptionTypeEncryptedVMGuestStateOnlyWithPmk ConfidentialVMEncryptionType = "EncryptedVMGuestStateOnlyWithPmk" ConfidentialVMEncryptionTypeEncryptedWithCmk ConfidentialVMEncryptionType = "EncryptedWithCmk" ConfidentialVMEncryptionTypeEncryptedWithPmk ConfidentialVMEncryptionType = "EncryptedWithPmk" + ConfidentialVMEncryptionTypeNonPersistedTPM ConfidentialVMEncryptionType = "NonPersistedTPM" ) // PossibleConfidentialVMEncryptionTypeValues returns the possible values for the ConfidentialVMEncryptionType const type. @@ -222,6 +223,7 @@ func PossibleConfidentialVMEncryptionTypeValues() []ConfidentialVMEncryptionType ConfidentialVMEncryptionTypeEncryptedVMGuestStateOnlyWithPmk, ConfidentialVMEncryptionTypeEncryptedWithCmk, ConfidentialVMEncryptionTypeEncryptedWithPmk, + ConfidentialVMEncryptionTypeNonPersistedTPM, } } @@ -899,11 +901,8 @@ func PossibleGalleryProvisioningStateValues() []GalleryProvisioningState { } } -// GallerySharingPermissionTypes - This property allows you to specify the permission of sharing gallery. -// Possible values are: -// Private -// Groups -// Community +// GallerySharingPermissionTypes - This property allows you to specify the permission of sharing gallery. Possible values +// are: Private, Groups, Community. type GallerySharingPermissionTypes string const ( @@ -1155,6 +1154,24 @@ func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationR } } +// Mode - Specifies the mode that ProxyAgent will execute on if the feature is enabled. ProxyAgent will start to audit or +// monitor but not enforce access control over requests to host endpoints in Audit mode, +// while in Enforce mode it will enforce access control. The default value is Enforce mode. +type Mode string + +const ( + ModeAudit Mode = "Audit" + ModeEnforce Mode = "Enforce" +) + +// PossibleModeValues returns the possible values for the Mode const type. +func PossibleModeValues() []Mode { + return []Mode{ + ModeAudit, + ModeEnforce, + } +} + // NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network // Interface Configurations type NetworkAPIVersion string @@ -1265,9 +1282,7 @@ func PossibleOperatingSystemTypeValues() []OperatingSystemType { } // OperatingSystemTypes - This property allows you to specify the supported type of the OS that application is built for. -// Possible values are: -// Windows -// Linux +// Possible values are: Windows, Linux. type OperatingSystemTypes string const ( @@ -1631,12 +1646,14 @@ type ReplicationStatusTypes string const ( ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = "ReplicationStatus" + ReplicationStatusTypesUefiSettings ReplicationStatusTypes = "UefiSettings" ) // PossibleReplicationStatusTypesValues returns the possible values for the ReplicationStatusTypes const type. func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { return []ReplicationStatusTypes{ ReplicationStatusTypesReplicationStatus, + ReplicationStatusTypesUefiSettings, } } @@ -1798,13 +1815,32 @@ func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { } } +// SSHEncryptionTypes - The encryption type of the SSH keys to be generated. See SshEncryptionTypes for possible set of values. +// If not provided, will default to RSA +type SSHEncryptionTypes string + +const ( + SSHEncryptionTypesEd25519 SSHEncryptionTypes = "Ed25519" + SSHEncryptionTypesRSA SSHEncryptionTypes = "RSA" +) + +// PossibleSSHEncryptionTypesValues returns the possible values for the SSHEncryptionTypes const type. +func PossibleSSHEncryptionTypesValues() []SSHEncryptionTypes { + return []SSHEncryptionTypes{ + SSHEncryptionTypesEd25519, + SSHEncryptionTypesRSA, + } +} + // SecurityEncryptionTypes - Specifies the EncryptionType of the managed disk. It is set to DiskWithVMGuestState for encryption -// of the managed disk along with VMGuestState blob, and VMGuestStateOnly for encryption of just the -// VMGuestState blob. Note: It can be set for only Confidential VMs. +// of the managed disk along with VMGuestState blob, VMGuestStateOnly for encryption of just the +// VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob.. Note: It can be set +// for only Confidential VMs. type SecurityEncryptionTypes string const ( SecurityEncryptionTypesDiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState" + SecurityEncryptionTypesNonPersistedTPM SecurityEncryptionTypes = "NonPersistedTPM" SecurityEncryptionTypesVMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly" ) @@ -1812,6 +1848,7 @@ const ( func PossibleSecurityEncryptionTypesValues() []SecurityEncryptionTypes { return []SecurityEncryptionTypes{ SecurityEncryptionTypesDiskWithVMGuestState, + SecurityEncryptionTypesNonPersistedTPM, SecurityEncryptionTypesVMGuestStateOnly, } } @@ -1895,10 +1932,8 @@ func PossibleSharedToValuesValues() []SharedToValues { } } -// SharingProfileGroupTypes - This property allows you to specify the type of sharing group. -// Possible values are: -// Subscriptions -// AADTenants +// SharingProfileGroupTypes - This property allows you to specify the type of sharing group. Possible values are: Subscriptions, +// AADTenants. type SharingProfileGroupTypes string const ( @@ -1934,11 +1969,8 @@ func PossibleSharingStateValues() []SharingState { } } -// SharingUpdateOperationTypes - This property allows you to specify the operation type of gallery sharing update. -// Possible values are: -// Add -// Remove -// Reset +// SharingUpdateOperationTypes - This property allows you to specify the operation type of gallery sharing update. Possible +// values are: Add, Remove, Reset. type SharingUpdateOperationTypes string const ( @@ -2047,6 +2079,40 @@ func PossibleStorageAccountTypesValues() []StorageAccountTypes { } } +// UefiKeyType - The type of key signature. +type UefiKeyType string + +const ( + UefiKeyTypeSHA256 UefiKeyType = "sha256" + UefiKeyTypeX509 UefiKeyType = "x509" +) + +// PossibleUefiKeyTypeValues returns the possible values for the UefiKeyType const type. +func PossibleUefiKeyTypeValues() []UefiKeyType { + return []UefiKeyType{ + UefiKeyTypeSHA256, + UefiKeyTypeX509, + } +} + +// UefiSignatureTemplateName - The name of the signature template that contains default UEFI keys. +type UefiSignatureTemplateName string + +const ( + UefiSignatureTemplateNameMicrosoftUefiCertificateAuthorityTemplate UefiSignatureTemplateName = "MicrosoftUefiCertificateAuthorityTemplate" + UefiSignatureTemplateNameMicrosoftWindowsTemplate UefiSignatureTemplateName = "MicrosoftWindowsTemplate" + UefiSignatureTemplateNameNoSignatureTemplate UefiSignatureTemplateName = "NoSignatureTemplate" +) + +// PossibleUefiSignatureTemplateNameValues returns the possible values for the UefiSignatureTemplateName const type. +func PossibleUefiSignatureTemplateNameValues() []UefiSignatureTemplateName { + return []UefiSignatureTemplateName{ + UefiSignatureTemplateNameMicrosoftUefiCertificateAuthorityTemplate, + UefiSignatureTemplateNameMicrosoftWindowsTemplate, + UefiSignatureTemplateNameNoSignatureTemplate, + } +} + // UpgradeMode - Specifies the mode of an upgrade to virtual machines in the scale set. // Possible values are: // Manual - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go index 4a1eea66b..99434bfab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go @@ -48,7 +48,7 @@ func NewDedicatedHostGroupsClient(subscriptionID string, credential azcore.Token // see Dedicated Host Documentation [https://go.microsoft.com/fwlink/?linkid=2082596] // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Create Dedicated Host Group. @@ -96,7 +96,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -117,7 +117,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateHandleResponse(resp *http // Delete - Delete a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientDeleteOptions contains the optional parameters for the DedicatedHostGroupsClient.Delete @@ -163,7 +163,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -172,7 +172,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientGetOptions contains the optional parameters for the DedicatedHostGroupsClient.Get method. @@ -221,7 +221,7 @@ func (client *DedicatedHostGroupsClient) getCreateRequest(ctx context.Context, r if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -239,7 +239,7 @@ func (client *DedicatedHostGroupsClient) getHandleResponse(resp *http.Response) // NewListByResourceGroupPager - Lists all of the dedicated host groups in the specified resource group. Use the nextLink // property in the response to get the next page of dedicated host groups. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - DedicatedHostGroupsClientListByResourceGroupOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListByResourceGroupPager // method. @@ -282,7 +282,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +300,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupHandleResponse(resp // NewListBySubscriptionPager - Lists all of the dedicated host groups in the subscription. Use the nextLink property in the // response to get the next page of dedicated host groups. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - DedicatedHostGroupsClientListBySubscriptionOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListBySubscriptionPager // method. func (client *DedicatedHostGroupsClient) NewListBySubscriptionPager(options *DedicatedHostGroupsClientListBySubscriptionOptions) *runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse] { @@ -338,7 +338,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionHandleResponse(resp * // Update - Update an dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Update Dedicated Host Group operation. @@ -404,7 +404,7 @@ func (client *DedicatedHostGroupsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go index d91e4db2a..b50a3ec65 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go @@ -47,7 +47,7 @@ func NewDedicatedHostsClient(subscriptionID string, credential azcore.TokenCrede // BeginCreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -74,7 +74,7 @@ func (client *DedicatedHostsClient) BeginCreateOrUpdate(ctx context.Context, res // CreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *DedicatedHostsClient) createOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDelete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -158,7 +158,7 @@ func (client *DedicatedHostsClient) BeginDelete(ctx context.Context, resourceGro // Delete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *DedicatedHostsClient) deleteOperation(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res // Get - Retrieves information about a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -267,7 +267,7 @@ func (client *DedicatedHostsClient) getCreateRequest(ctx context.Context, resour if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (Dedi // NewListAvailableSizesPager - Lists all available dedicated host sizes to which the specified dedicated host can be resized. // NOTE: The dedicated host sizes provided can be used to only scale up the existing dedicated host. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -339,7 +339,7 @@ func (client *DedicatedHostsClient) listAvailableSizesCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *DedicatedHostsClient) listAvailableSizesHandleResponse(resp *http. // NewListByHostGroupPager - Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property // in the response to get the next page of dedicated hosts. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostsClientListByHostGroupOptions contains the optional parameters for the DedicatedHostsClient.NewListByHostGroupPager @@ -405,7 +405,7 @@ func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -420,13 +420,100 @@ func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Res return result, nil } +// BeginRedeploy - Redeploy the dedicated host. The operation will complete successfully once the dedicated host has migrated +// to a new node and is running. To determine the health of VMs deployed on the dedicated host +// after the redeploy check the Resource Health Center in the Azure Portal. Please refer to https://docs.microsoft.com/azure/service-health/resource-health-overview +// for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +// - resourceGroupName - The name of the resource group. +// - hostGroupName - The name of the dedicated host group. +// - hostName - The name of the dedicated host. +// - options - DedicatedHostsClientBeginRedeployOptions contains the optional parameters for the DedicatedHostsClient.BeginRedeploy +// method. +func (client *DedicatedHostsClient) BeginRedeploy(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRedeployOptions) (*runtime.Poller[DedicatedHostsClientRedeployResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.redeploy(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[DedicatedHostsClientRedeployResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[DedicatedHostsClientRedeployResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Redeploy - Redeploy the dedicated host. The operation will complete successfully once the dedicated host has migrated to +// a new node and is running. To determine the health of VMs deployed on the dedicated host +// after the redeploy check the Resource Health Center in the Azure Portal. Please refer to https://docs.microsoft.com/azure/service-health/resource-health-overview +// for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +func (client *DedicatedHostsClient) redeploy(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRedeployOptions) (*http.Response, error) { + var err error + const operationName = "DedicatedHostsClient.BeginRedeploy" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.redeployCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// redeployCreateRequest creates the Redeploy request. +func (client *DedicatedHostsClient) redeployCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRedeployOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}/redeploy" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + // BeginRestart - Restart the dedicated host. The operation will complete successfully once the dedicated host has restarted // and is running. To determine the health of VMs deployed on the dedicated host after the // restart check the Resource Health Center in the Azure Portal. Please refer to https://docs.microsoft.com/azure/service-health/resource-health-overview // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -455,7 +542,7 @@ func (client *DedicatedHostsClient) BeginRestart(ctx context.Context, resourceGr // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *DedicatedHostsClient) restart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginRestart" @@ -501,7 +588,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -510,7 +597,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re // BeginUpdate - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -537,7 +624,7 @@ func (client *DedicatedHostsClient) BeginUpdate(ctx context.Context, resourceGro // Update - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *DedicatedHostsClient) update(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginUpdate" @@ -583,7 +670,7 @@ func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go index 5bd114c06..8fac494c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go @@ -47,7 +47,7 @@ func NewGalleriesClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -74,7 +74,7 @@ func (client *GalleriesClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginCreateOrUpdate" @@ -116,7 +116,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { @@ -128,7 +128,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery to be deleted. // - options - GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. @@ -152,7 +152,7 @@ func (client *GalleriesClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleriesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginDelete" @@ -194,7 +194,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource // Get - Retrieves information about a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - options - GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. @@ -249,7 +249,7 @@ func (client *GalleriesClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") if options != nil && options.Select != nil { reqQP.Set("$select", string(*options.Select)) } @@ -272,7 +272,7 @@ func (client *GalleriesClient) getHandleResponse(resp *http.Response) (Galleries // NewListPager - List galleries under a subscription. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - options - GalleriesClientListOptions contains the optional parameters for the GalleriesClient.NewListPager method. func (client *GalleriesClient) NewListPager(options *GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[GalleriesClientListResponse]{ @@ -309,7 +309,7 @@ func (client *GalleriesClient) listCreateRequest(ctx context.Context, options *G return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -326,7 +326,7 @@ func (client *GalleriesClient) listHandleResponse(resp *http.Response) (Gallerie // NewListByResourceGroupPager - List galleries under a resource group. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - options - GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.NewListByResourceGroupPager // method. @@ -369,7 +369,7 @@ func (client *GalleriesClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -387,7 +387,7 @@ func (client *GalleriesClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginUpdate - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -413,7 +413,7 @@ func (client *GalleriesClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleriesClient) update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginUpdate" @@ -455,7 +455,7 @@ func (client *GalleriesClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go index 897c18a2f..1ebec623b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationsClient(subscriptionID string, credential azcore.Token // BeginCreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be created. // - galleryApplicationName - The name of the gallery Application Definition to be created or updated. The allowed characters @@ -76,7 +76,7 @@ func (client *GalleryApplicationsClient) BeginCreateOrUpdate(ctx context.Context // CreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context // BeginDelete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be deleted. // - galleryApplicationName - The name of the gallery Application Definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryApplicationsClient) BeginDelete(ctx context.Context, resour // Delete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which the Application Definitions are to be retrieved. // - galleryApplicationName - The name of the gallery Application Definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryApplicationsClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryApplicationsClient) getHandleResponse(resp *http.Response) // NewListByGalleryPager - List gallery Application Definitions in a gallery. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which Application Definitions are to be listed. // - options - GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryApplicationsClient) listByGalleryCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryApplicationsClient) listByGalleryHandleResponse(resp *http. // BeginUpdate - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be updated. // - galleryApplicationName - The name of the gallery Application Definition to be updated. The allowed characters are alphabets @@ -378,7 +378,7 @@ func (client *GalleryApplicationsClient) BeginUpdate(ctx context.Context, resour // Update - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *GalleryApplicationsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go index 3148aa31d..3d885cb93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationVersionsClient(subscriptionID string, credential azcor // BeginCreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryApplicationVersionsClient) BeginCreateOrUpdate(ctx context. // CreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx // BeginDelete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -166,7 +166,7 @@ func (client *GalleryApplicationVersionsClient) BeginDelete(ctx context.Context, // Delete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. // Get - Retrieves information about a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -285,7 +285,7 @@ func (client *GalleryApplicationVersionsClient) getCreateRequest(ctx context.Con if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryApplicationVersionsClient) getHandleResponse(resp *http.Res // NewListByGalleryApplicationPager - List gallery Application Versions in a gallery Application Definition. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the Shared Application Gallery Application Definition from which the Application Versions @@ -356,7 +356,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationHandleRe // BeginUpdate - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be updated. @@ -404,7 +404,7 @@ func (client *GalleryApplicationVersionsClient) BeginUpdate(ctx context.Context, // Update - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryApplicationVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginUpdate" @@ -454,7 +454,7 @@ func (client *GalleryApplicationVersionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go index 84fc57a2b..6a684b3ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go @@ -47,7 +47,7 @@ func NewGalleryImagesClient(subscriptionID string, credential azcore.TokenCreden // BeginCreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be created. // - galleryImageName - The name of the gallery image definition to be created or updated. The allowed characters are alphabets @@ -76,7 +76,7 @@ func (client *GalleryImagesClient) BeginCreateOrUpdate(ctx context.Context, reso // CreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte // BeginDelete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be deleted. // - galleryImageName - The name of the gallery image definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryImagesClient) BeginDelete(ctx context.Context, resourceGrou // Delete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso // Get - Retrieves information about a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. // - galleryImageName - The name of the gallery image definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryImagesClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (Galle // NewListByGalleryPager - List gallery image definitions in a gallery. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which Image Definitions are to be listed. // - options - GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryImagesClient) listByGalleryCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Respon // BeginUpdate - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be updated. // - galleryImageName - The name of the gallery image definition to be updated. The allowed characters are alphabets and numbers @@ -377,7 +377,7 @@ func (client *GalleryImagesClient) BeginUpdate(ctx context.Context, resourceGrou // Update - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImagesClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginUpdate" @@ -423,7 +423,7 @@ func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go index 7bbe1f00f..05f3cf352 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go @@ -47,7 +47,7 @@ func NewGalleryImageVersionsClient(subscriptionID string, credential azcore.Toke // BeginCreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryImageVersionsClient) BeginCreateOrUpdate(ctx context.Contex // CreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImageVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex // BeginDelete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -166,7 +166,7 @@ func (client *GalleryImageVersionsClient) BeginDelete(ctx context.Context, resou // Delete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImageVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex // Get - Retrieves information about a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -285,7 +285,7 @@ func (client *GalleryImageVersionsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryImageVersionsClient) getHandleResponse(resp *http.Response) // NewListByGalleryImagePager - List gallery image versions in a gallery image definition. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the Shared Image Gallery Image Definition from which the Image Versions are to be listed. @@ -355,7 +355,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -373,7 +373,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp // BeginUpdate - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be updated. @@ -403,7 +403,7 @@ func (client *GalleryImageVersionsClient) BeginUpdate(ctx context.Context, resou // Update - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GalleryImageVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginUpdate" @@ -453,7 +453,7 @@ func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go index e53a8f08a..28d024e97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go @@ -47,7 +47,7 @@ func NewGallerySharingProfileClient(subscriptionID string, credential azcore.Tok // BeginUpdate - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - sharingUpdate - Parameters supplied to the update gallery sharing profile. @@ -73,7 +73,7 @@ func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, reso // Update - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GallerySharingProfileClient.BeginUpdate" @@ -115,7 +115,7 @@ func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, sharingUpdate); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go index e97d6fa30..5c5ab8a93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go @@ -47,7 +47,7 @@ func NewImagesClient(subscriptionID string, credential azcore.TokenCredential, o // BeginCreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Create Image operation. @@ -73,7 +73,7 @@ func (client *ImagesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro // CreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *ImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginCreateOrUpdate" @@ -115,7 +115,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -127,7 +127,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res // BeginDelete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientBeginDeleteOptions contains the optional parameters for the ImagesClient.BeginDelete method. @@ -151,7 +151,7 @@ func (client *ImagesClient) BeginDelete(ctx context.Context, resourceGroupName s // Delete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *ImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginDelete" @@ -193,7 +193,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -202,7 +202,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro // Get - Gets an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientGetOptions contains the optional parameters for the ImagesClient.Get method. @@ -251,7 +251,7 @@ func (client *ImagesClient) getCreateRequest(ctx context.Context, resourceGroupN if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -269,7 +269,7 @@ func (client *ImagesClient) getHandleResponse(resp *http.Response) (ImagesClient // NewListPager - Gets the list of Images in the subscription. Use nextLink property in the response to get the next page // of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - ImagesClientListOptions contains the optional parameters for the ImagesClient.NewListPager method. func (client *ImagesClient) NewListPager(options *ImagesClientListOptions) *runtime.Pager[ImagesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[ImagesClientListResponse]{ @@ -306,7 +306,7 @@ func (client *ImagesClient) listCreateRequest(ctx context.Context, options *Imag return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *ImagesClient) listHandleResponse(resp *http.Response) (ImagesClien // NewListByResourceGroupPager - Gets the list of images under a resource group. Use nextLink property in the response to // get the next page of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - ImagesClientListByResourceGroupOptions contains the optional parameters for the ImagesClient.NewListByResourceGroupPager // method. @@ -367,7 +367,7 @@ func (client *ImagesClient) listByResourceGroupCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -385,7 +385,7 @@ func (client *ImagesClient) listByResourceGroupHandleResponse(resp *http.Respons // BeginUpdate - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Update Image operation. @@ -410,7 +410,7 @@ func (client *ImagesClient) BeginUpdate(ctx context.Context, resourceGroupName s // Update - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *ImagesClient) update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *ImagesClient) updateCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go index 6dfabd2bf..a48766ffd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go @@ -48,7 +48,7 @@ func NewLogAnalyticsClient(subscriptionID string, credential azcore.TokenCredent // to show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getRequestRateByInterval Api. // - options - LogAnalyticsClientBeginExportRequestRateByIntervalOptions contains the optional parameters for the LogAnalyticsClient.BeginExportRequestRateByInterval @@ -75,7 +75,7 @@ func (client *LogAnalyticsClient) BeginExportRequestRateByInterval(ctx context.C // show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *LogAnalyticsClient) exportRequestRateByInterval(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportRequestRateByInterval" @@ -113,7 +113,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -126,7 +126,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c // window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getThrottledRequests Api. // - options - LogAnalyticsClientBeginExportThrottledRequestsOptions contains the optional parameters for the LogAnalyticsClient.BeginExportThrottledRequests @@ -152,7 +152,7 @@ func (client *LogAnalyticsClient) BeginExportThrottledRequests(ctx context.Conte // ExportThrottledRequests - Export logs that show total throttled Api requests for this subscription in the given time window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *LogAnalyticsClient) exportThrottledRequests(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportThrottledRequests" @@ -190,7 +190,7 @@ func (client *LogAnalyticsClient) exportThrottledRequestsCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index 8275c7791..5b5a3197b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -101,6 +101,15 @@ type ApplicationProfile struct { GalleryApplications []*VMGalleryApplication } +// AttachDetachDataDisksRequest - Specifies the input for attaching and detaching a list of managed data disks. +type AttachDetachDataDisksRequest struct { + // The list of managed data disks to be attached. + DataDisksToAttach []*DataDisksToAttach + + // The list of managed data disks to be detached. + DataDisksToDetach []*DataDisksToDetach +} + // AutomaticOSUpgradePolicy - The configuration parameters used for performing automatic OS upgrade. type AutomaticOSUpgradePolicy struct { // Whether OS image rollback feature should be disabled. Default value is false. @@ -113,6 +122,11 @@ type AutomaticOSUpgradePolicy struct { // is automatically set to false and cannot be set to true. EnableAutomaticOSUpgrade *bool + // Indicates whether Auto OS Upgrade should undergo deferral. Deferred OS upgrades will send advanced notifications on a per-VM + // basis that an OS upgrade from rolling upgrades is incoming, via the IMDS + // tag 'Platform.PendingOSUpgrade'. The upgrade then defers until the upgrade is approved via an ApproveRollingUpgrade call. + OSRollingUpgradeDeferral *bool + // Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Default value is false. Auto OS Upgrade // will fallback to the default policy if no policy is defined on the VMSS. UseRollingUpgradePolicy *bool @@ -353,6 +367,10 @@ type CapacityReservationGroup struct { type CapacityReservationGroupInstanceView struct { // READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group. CapacityReservations []*CapacityReservationInstanceViewWithName + + // READ-ONLY; List of the subscriptions that the capacity reservation group is shared with. Note: Minimum api-version: 2023-09-01. + // Please refer to https://aka.ms/computereservationsharing for more details. + SharedSubscriptionIDs []*SubResourceReadOnly } // CapacityReservationGroupListResult - The List capacity reservation group with resource group response. @@ -367,6 +385,13 @@ type CapacityReservationGroupListResult struct { // CapacityReservationGroupProperties - capacity reservation group Properties. type CapacityReservationGroupProperties struct { + // Specifies the settings to enable sharing across subscriptions for the capacity reservation group resource. Pls. keep in + // mind the capacity reservation group resource generally can be shared across + // subscriptions belonging to a single azure AAD tenant or cross AAD tenant if there is a trust relationship established between + // the AAD tenants. Note: Minimum api-version: 2023-09-01. Please refer to + // https://aka.ms/computereservationsharing for more details. + SharingProfile *ResourceSharingProfile + // READ-ONLY; A list of all capacity reservation resource ids that belong to capacity reservation group. CapacityReservations []*SubResourceReadOnly @@ -769,6 +794,9 @@ type CommunityGallery struct { // The identifier information of community gallery. Identifier *CommunityGalleryIdentifier + // Describes the properties of a community gallery. + Properties *CommunityGalleryProperties + // READ-ONLY; Resource location Location *string @@ -820,7 +848,7 @@ type CommunityGalleryImageList struct { // REQUIRED; A list of community gallery images. Value []*CommunityGalleryImage - // The uri to fetch the next page of community gallery images. Call ListNext() with this to fetch the next page of community + // The URI to fetch the next page of community gallery images. Call ListNext() with this to fetch the next page of community // gallery images. NextLink *string } @@ -835,23 +863,26 @@ type CommunityGalleryImageProperties struct { OSState *OperatingSystemStateTypes // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a - // managed image. - // Possible values are: - // Windows - // Linux + // managed image. Possible values are: Windows, Linux. OSType *OperatingSystemTypes // The architecture of the image. Applicable to OS disks only. Architecture *Architecture + // The artifact tags of a community gallery resource. + ArtifactTags map[string]*string + // Describes the disallowed disk types. Disallowed *Disallowed + // The disclaimer for a community gallery resource. + Disclaimer *string + // The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property // is updatable. EndOfLifeDate *time.Time - // End-user license agreement for the current community gallery image. + // The end-user license agreement for the current community gallery image. Eula *string // A list of gallery image features. @@ -860,7 +891,7 @@ type CommunityGalleryImageProperties struct { // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. HyperVGeneration *HyperVGeneration - // Privacy statement uri for the current community gallery image. + // Privacy statement URI for the current community gallery image. PrivacyStatementURI *string // Describes the gallery image definition purchase plan. This is used by marketplace images. @@ -893,13 +924,19 @@ type CommunityGalleryImageVersionList struct { // REQUIRED; A list of community gallery image versions. Value []*CommunityGalleryImageVersion - // The uri to fetch the next page of community gallery image versions. Call ListNext() with this to fetch the next page of + // The URI to fetch the next page of community gallery image versions. Call ListNext() with this to fetch the next page of // community gallery image versions. NextLink *string } // CommunityGalleryImageVersionProperties - Describes the properties of a gallery image version. type CommunityGalleryImageVersionProperties struct { + // The artifact tags of a community gallery resource. + ArtifactTags map[string]*string + + // The disclaimer for a community gallery resource. + Disclaimer *string + // The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This // property is updatable. EndOfLifeDate *time.Time @@ -936,6 +973,36 @@ type CommunityGalleryInfo struct { PublicNames []*string } +// CommunityGalleryMetadata - The metadata of community gallery. +type CommunityGalleryMetadata struct { + // REQUIRED; A list of public names the gallery has. + PublicNames []*string + + // REQUIRED; The publisher email id of this community gallery. + PublisherContact *string + + // The end-user license agreement for this community gallery. + Eula *string + + // The link for the privacy statement of this community gallery from the gallery publisher. + PrivacyStatementURI *string + + // The publisher URI of this community gallery. + PublisherURI *string +} + +// CommunityGalleryProperties - Describes the properties of a community gallery. +type CommunityGalleryProperties struct { + // The artifact tags of a community gallery resource. + ArtifactTags map[string]*string + + // The metadata of community gallery. + CommunityMetadata *CommunityGalleryMetadata + + // The disclaimer for a community gallery resource. + Disclaimer *string +} + // CopyCompletionError - Indicates the error details if the background copy of a resource created via the CopyStart operation // fails. type CopyCompletionError struct { @@ -1078,6 +1145,26 @@ type DataDiskImageEncryption struct { DiskEncryptionSetID *string } +// DataDisksToAttach - Describes the data disk to be attached. +type DataDisksToAttach struct { + // REQUIRED; ID of the managed data disk. + DiskID *string + + // The logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be + // unique for each data disk attached to a VM. If not specified, lun would be auto + // assigned. + Lun *int32 +} + +// DataDisksToDetach - Describes the data disk to be detached. +type DataDisksToDetach struct { + // REQUIRED; ID of the managed data disk. + DiskID *string + + // Supported options available for Detach of a disk from a VM. Refer to DetachOption object reference for more details. + DetachOption *DiskDetachOptionTypes +} + // DedicatedHost - Specifies information about the Dedicated host. type DedicatedHost struct { // REQUIRED; Resource location @@ -1891,6 +1978,12 @@ type Encryption struct { Type *EncryptionType } +// EncryptionIdentity - Specifies the Managed Identity used by ADE to get access token for keyvault operations. +type EncryptionIdentity struct { + // Specifies ARM Resource ID of one of the user identities associated with the VM. + UserAssignedIdentityResourceID *string +} + // EncryptionImages - Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the // gallery artifact. type EncryptionImages struct { @@ -2085,10 +2178,8 @@ type GalleryApplicationList struct { // GalleryApplicationProperties - Describes the properties of a gallery Application Definition. type GalleryApplicationProperties struct { - // REQUIRED; This property allows you to specify the supported type of the OS that application is built for. - // Possible values are: - // Windows - // Linux + // REQUIRED; This property allows you to specify the supported type of the OS that application is built for. Possible values + // are: Windows, Linux. SupportedOSType *OperatingSystemTypes // A list of custom actions that can be performed with all of the Gallery Application Versions within this Gallery Application. @@ -2418,10 +2509,7 @@ type GalleryImageProperties struct { OSState *OperatingSystemStateTypes // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a - // managed image. - // Possible values are: - // Windows - // Linux + // managed image. Possible values are: Windows, Linux. OSType *OperatingSystemTypes // The architecture of the image. Applicable to OS disks only. @@ -2522,6 +2610,9 @@ type GalleryImageVersionProperties struct { // This is the safety profile of the Gallery Image Version. SafetyProfile *GalleryImageVersionSafetyProfile + // The security profile of a gallery image version + SecurityProfile *ImageVersionSecurityProfile + // READ-ONLY; The provisioning state, which only appears in the response. ProvisioningState *GalleryProvisioningState @@ -2582,6 +2673,15 @@ type GalleryImageVersionStorageProfile struct { Source *GalleryArtifactVersionFullSource } +// GalleryImageVersionUefiSettings - Contains UEFI settings for the image version. +type GalleryImageVersionUefiSettings struct { + // Additional UEFI key signatures that will be added to the image in addition to the signature templates + AdditionalSignatures *UefiKeySignatures + + // The name of the template(s) that contains default UEFI key signatures that will be added to the image. + SignatureTemplateNames []*UefiSignatureTemplateName +} + // GalleryImageVersionUpdate - Specifies information about the gallery image version that you want to update. type GalleryImageVersionUpdate struct { // Describes the properties of a gallery image version. @@ -2959,6 +3059,12 @@ type ImageUpdate struct { Tags map[string]*string } +// ImageVersionSecurityProfile - The security profile of a gallery image version +type ImageVersionSecurityProfile struct { + // Contains UEFI settings for the image version. + UefiSettings *GalleryImageVersionUefiSettings +} + // InnerError - Inner error details. type InnerError struct { // The internal error message or exception dump. @@ -3722,7 +3828,7 @@ type PatchSettings struct { PatchMode *WindowsVMGuestPatchMode } -// PirCommunityGalleryResource - Base information about the community gallery resource in pir. +// PirCommunityGalleryResource - Base information about the community gallery resource in azure compute gallery. type PirCommunityGalleryResource struct { // The identifier information of community gallery. Identifier *CommunityGalleryIdentifier @@ -3961,6 +4067,21 @@ type ProximityPlacementGroupUpdate struct { Tags map[string]*string } +// ProxyAgentSettings - Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. +type ProxyAgentSettings struct { + // Specifies whether ProxyAgent feature should be enabled on the virtual machine or virtual machine scale set. + Enabled *bool + + // Increase the value of this property allows user to reset the key used for securing communication channel between guest + // and host. + KeyIncarnationID *int32 + + // Specifies the mode that ProxyAgent will execute on if the feature is enabled. ProxyAgent will start to audit or monitor + // but not enforce access control over requests to host endpoints in Audit mode, + // while in Enforce mode it will enforce access control. The default value is Enforce mode. + Mode *Mode +} + // ProxyOnlyResource - The ProxyOnly Resource model definition. type ProxyOnlyResource struct { // READ-ONLY; Resource Id @@ -4093,6 +4214,27 @@ type RequestRateByIntervalInput struct { GroupByUserAgent *bool } +// ResiliencyPolicy - Describes an resiliency policy - resilientVMCreationPolicy and/or resilientVMDeletionPolicy. +type ResiliencyPolicy struct { + // The configuration parameters used while performing resilient VM creation. + ResilientVMCreationPolicy *ResilientVMCreationPolicy + + // The configuration parameters used while performing resilient VM deletion. + ResilientVMDeletionPolicy *ResilientVMDeletionPolicy +} + +// ResilientVMCreationPolicy - The configuration parameters used while performing resilient VM creation. +type ResilientVMCreationPolicy struct { + // Specifies whether resilient VM creation should be enabled on the virtual machine scale set. The default value is false. + Enabled *bool +} + +// ResilientVMDeletionPolicy - The configuration parameters used while performing resilient VM deletion. +type ResilientVMDeletionPolicy struct { + // Specifies whether resilient VM deletion should be enabled on the virtual machine scale set. The default value is false. + Enabled *bool +} + // Resource - The Resource model definition. type Resource struct { // REQUIRED; Resource location @@ -4280,6 +4422,13 @@ type ResourceSKUsResult struct { NextLink *string } +type ResourceSharingProfile struct { + // Specifies an array of subscription resource IDs that capacity reservation group is shared with. Note: Minimum api-version: + // 2023-09-01. Please refer to https://aka.ms/computereservationsharing for more + // details. + SubscriptionIDs []*SubResource +} + // ResourceURIList - The List resources which are encrypted with the disk encryption set. type ResourceURIList struct { // REQUIRED; A list of IDs or Owner IDs of resources which are encrypted with the disk encryption set. @@ -4527,6 +4676,9 @@ type RestorePointSourceVMStorageProfile struct { // Gets the OS disk of the VM captured at the time of the restore point creation. OSDisk *RestorePointSourceVMOSDisk + + // READ-ONLY; Gets the disk controller type of the VM captured at the time of the restore point creation. + DiskControllerType *DiskControllerTypes } // RetrieveBootDiagnosticsDataResult - The SAS URIs of the console screenshot and serial log blobs. @@ -4858,6 +5010,13 @@ type SSHConfiguration struct { PublicKeys []*SSHPublicKey } +// SSHGenerateKeyPairInputParameters - Parameters for GenerateSshKeyPair. +type SSHGenerateKeyPairInputParameters struct { + // The encryption type of the SSH keys to be generated. See SshEncryptionTypes for possible set of values. If not provided, + // will default to RSA + EncryptionType *SSHEncryptionTypes +} + // SSHPublicKey - Contains information about SSH certificate public key and the path on the Linux VM where the public key // is placed. type SSHPublicKey struct { @@ -4981,6 +5140,12 @@ type SecurityProfile struct { // property is set to true for the resource. EncryptionAtHost *bool + // Specifies the Managed Identity used by ADE to get access token for keyvault operations. + EncryptionIdentity *EncryptionIdentity + + // Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. + ProxyAgentSettings *ProxyAgentSettings + // Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. The // default behavior is: UefiSettings will not be enabled unless this property is // set. @@ -5009,6 +5174,9 @@ type SharedGallery struct { // The identifier information of shared gallery. Identifier *SharedGalleryIdentifier + // Specifies the properties of a shared gallery + Properties *SharedGalleryProperties + // READ-ONLY; Resource location Location *string @@ -5080,15 +5248,15 @@ type SharedGalleryImageProperties struct { OSState *OperatingSystemStateTypes // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a - // managed image. - // Possible values are: - // Windows - // Linux + // managed image. Possible values are: Windows, Linux. OSType *OperatingSystemTypes // The architecture of the image. Applicable to OS disks only. Architecture *Architecture + // The artifact tags of a shared gallery resource. + ArtifactTags map[string]*string + // Describes the disallowed disk types. Disallowed *Disallowed @@ -5142,6 +5310,9 @@ type SharedGalleryImageVersionList struct { // SharedGalleryImageVersionProperties - Describes the properties of a gallery image version. type SharedGalleryImageVersionProperties struct { + // The artifact tags of a shared gallery resource. + ArtifactTags map[string]*string + // The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This // property is updatable. EndOfLifeDate *time.Time @@ -5184,16 +5355,18 @@ type SharedGalleryOSDiskImage struct { DiskSizeGB *int32 } +// SharedGalleryProperties - Specifies the properties of a shared gallery +type SharedGalleryProperties struct { + // READ-ONLY; The artifact tags of a shared gallery resource. + ArtifactTags map[string]*string +} + // SharingProfile - Profile for gallery sharing to subscription or tenant type SharingProfile struct { // Information of community gallery if current gallery is shared to community. CommunityGalleryInfo *CommunityGalleryInfo - // This property allows you to specify the permission of sharing gallery. - // Possible values are: - // Private - // Groups - // Community + // This property allows you to specify the permission of sharing gallery. Possible values are: Private, Groups, Community. Permissions *GallerySharingPermissionTypes // READ-ONLY; A list of sharing profile groups. @@ -5205,10 +5378,7 @@ type SharingProfileGroup struct { // A list of subscription/tenant ids the gallery is aimed to be shared to. IDs []*string - // This property allows you to specify the type of sharing group. - // Possible values are: - // Subscriptions - // AADTenants + // This property allows you to specify the type of sharing group. Possible values are: Subscriptions, AADTenants. Type *SharingProfileGroupTypes } @@ -5223,11 +5393,8 @@ type SharingStatus struct { // SharingUpdate - Specifies information about the gallery sharing profile update. type SharingUpdate struct { - // REQUIRED; This property allows you to specify the operation type of gallery sharing update. - // Possible values are: - // Add - // Remove - // Reset + // REQUIRED; This property allows you to specify the operation type of gallery sharing update. Possible values are: Add, Remove, + // Reset. OperationType *SharingUpdateOperationTypes // A list of sharing profile groups. @@ -5566,6 +5733,30 @@ type ThrottledRequestsInput struct { GroupByUserAgent *bool } +// UefiKey - A UEFI key signature. +type UefiKey struct { + // The type of key signature. + Type *UefiKeyType + + // The value of the key signature. + Value []*string +} + +// UefiKeySignatures - Additional UEFI key signatures that will be added to the image in addition to the signature templates +type UefiKeySignatures struct { + // The database of UEFI keys for this image version. + Db []*UefiKey + + // The database of revoked UEFI keys for this image version. + Dbx []*UefiKey + + // The Key Encryption Keys of this image version. + Kek []*UefiKey + + // The Platform Key of this image version. + Pk *UefiKey +} + // UefiSettings - Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Minimum // api-version: 2020-12-01. type UefiSettings struct { @@ -5752,8 +5943,9 @@ type VMDiskSecurityProfile struct { DiskEncryptionSet *DiskEncryptionSetParameters // Specifies the EncryptionType of the managed disk. It is set to DiskWithVMGuestState for encryption of the managed disk - // along with VMGuestState blob, and VMGuestStateOnly for encryption of just the - // VMGuestState blob. Note: It can be set for only Confidential VMs. + // along with VMGuestState blob, VMGuestStateOnly for encryption of just the + // VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob.. Note: It can be set + // for only Confidential VMs. SecurityEncryptionType *SecurityEncryptionTypes } @@ -5879,9 +6071,17 @@ type VirtualMachine struct { // The virtual machine zones. Zones []*string + // READ-ONLY; Etag is property returned in Create/Update/Get response of the VM, so that customer can supply it in the header + // to ensure optimistic updates. + Etag *string + // READ-ONLY; Resource Id ID *string + // READ-ONLY; ManagedBy is set to Virtual Machine Scale Set(VMSS) flex ARM resourceID, if the VM is part of the VMSS. This + // property is used by platform for internal resource group delete optimization. + ManagedBy *string + // READ-ONLY; Resource name Name *string @@ -6377,6 +6577,9 @@ type VirtualMachineInstanceView struct { // placement enabled. Minimum api-version: 2020-06-01. AssignedHost *string + // READ-ONLY; [Preview Feature] Specifies whether the VM is currently in or out of the Standby Pool. + IsVMInStandbyPool *bool + // READ-ONLY; The health status for the VM. VMHealth *VirtualMachineHealthStatus } @@ -6857,6 +7060,10 @@ type VirtualMachineScaleSet struct { // The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set Zones []*string + // READ-ONLY; Etag is property returned in Create/Update/Get response of the VMSS, so that customer can supply it in the header + // to ensure optimistic updates + Etag *string + // READ-ONLY; Resource Id ID *string @@ -7376,6 +7583,9 @@ type VirtualMachineScaleSetProperties struct { // api-version: 2018-04-01. ProximityPlacementGroup *SubResource + // Policy for Resiliency + ResiliencyPolicy *ResiliencyPolicy + // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy @@ -7716,6 +7926,9 @@ type VirtualMachineScaleSetUpdateProperties struct { // Minimum api-version: 2018-04-01. ProximityPlacementGroup *SubResource + // Policy for Resiliency + ResiliencyPolicy *ResiliencyPolicy + // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy @@ -7831,6 +8044,10 @@ type VirtualMachineScaleSetVM struct { // Resource tags Tags map[string]*string + // READ-ONLY; Etag is property returned in Update/Get response of the VMSS VM, so that customer can supply it in the header + // to ensure optimistic updates. + Etag *string + // READ-ONLY; Resource Id ID *string @@ -8055,6 +8272,11 @@ type VirtualMachineScaleSetVMProfile struct { // UserData for the virtual machines in the scale set, which must be base-64 encoded. Customer should not pass any secrets // in here. Minimum api-version: 2021-03-01. UserData *string + + // READ-ONLY; Specifies the time in which this VM profile for the Virtual Machine Scale Set was created. Minimum API version + // for this property is 2023-09-01. This value will be added to VMSS Flex VM tags when + // creating/updating the VMSS VM Profile with minimum api-version 2023-09-01. + TimeCreated *time.Time } // VirtualMachineScaleSetVMProperties - Describes the properties of a virtual machine scale set virtual machine. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 08c5d8c40..3c63478d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -279,11 +279,43 @@ func (a *ApplicationProfile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AttachDetachDataDisksRequest. +func (a AttachDetachDataDisksRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataDisksToAttach", a.DataDisksToAttach) + populate(objectMap, "dataDisksToDetach", a.DataDisksToDetach) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AttachDetachDataDisksRequest. +func (a *AttachDetachDataDisksRequest) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataDisksToAttach": + err = unpopulate(val, "DataDisksToAttach", &a.DataDisksToAttach) + delete(rawMsg, key) + case "dataDisksToDetach": + err = unpopulate(val, "DataDisksToDetach", &a.DataDisksToDetach) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AutomaticOSUpgradePolicy. func (a AutomaticOSUpgradePolicy) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "disableAutomaticRollback", a.DisableAutomaticRollback) populate(objectMap, "enableAutomaticOSUpgrade", a.EnableAutomaticOSUpgrade) + populate(objectMap, "osRollingUpgradeDeferral", a.OSRollingUpgradeDeferral) populate(objectMap, "useRollingUpgradePolicy", a.UseRollingUpgradePolicy) return json.Marshal(objectMap) } @@ -303,6 +335,9 @@ func (a *AutomaticOSUpgradePolicy) UnmarshalJSON(data []byte) error { case "enableAutomaticOSUpgrade": err = unpopulate(val, "EnableAutomaticOSUpgrade", &a.EnableAutomaticOSUpgrade) delete(rawMsg, key) + case "osRollingUpgradeDeferral": + err = unpopulate(val, "OSRollingUpgradeDeferral", &a.OSRollingUpgradeDeferral) + delete(rawMsg, key) case "useRollingUpgradePolicy": err = unpopulate(val, "UseRollingUpgradePolicy", &a.UseRollingUpgradePolicy) delete(rawMsg, key) @@ -794,6 +829,7 @@ func (c *CapacityReservationGroup) UnmarshalJSON(data []byte) error { func (c CapacityReservationGroupInstanceView) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "capacityReservations", c.CapacityReservations) + populate(objectMap, "sharedSubscriptionIds", c.SharedSubscriptionIDs) return json.Marshal(objectMap) } @@ -809,6 +845,9 @@ func (c *CapacityReservationGroupInstanceView) UnmarshalJSON(data []byte) error case "capacityReservations": err = unpopulate(val, "CapacityReservations", &c.CapacityReservations) delete(rawMsg, key) + case "sharedSubscriptionIds": + err = unpopulate(val, "SharedSubscriptionIDs", &c.SharedSubscriptionIDs) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -853,6 +892,7 @@ func (c CapacityReservationGroupProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "capacityReservations", c.CapacityReservations) populate(objectMap, "instanceView", c.InstanceView) + populate(objectMap, "sharingProfile", c.SharingProfile) populate(objectMap, "virtualMachinesAssociated", c.VirtualMachinesAssociated) return json.Marshal(objectMap) } @@ -872,6 +912,9 @@ func (c *CapacityReservationGroupProperties) UnmarshalJSON(data []byte) error { case "instanceView": err = unpopulate(val, "InstanceView", &c.InstanceView) delete(rawMsg, key) + case "sharingProfile": + err = unpopulate(val, "SharingProfile", &c.SharingProfile) + delete(rawMsg, key) case "virtualMachinesAssociated": err = unpopulate(val, "VirtualMachinesAssociated", &c.VirtualMachinesAssociated) delete(rawMsg, key) @@ -1823,6 +1866,7 @@ func (c CommunityGallery) MarshalJSON() ([]byte, error) { populate(objectMap, "identifier", c.Identifier) populate(objectMap, "location", c.Location) populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) populate(objectMap, "type", c.Type) return json.Marshal(objectMap) } @@ -1845,6 +1889,9 @@ func (c *CommunityGallery) UnmarshalJSON(data []byte) error { case "name": err = unpopulate(val, "Name", &c.Name) delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) case "type": err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) @@ -1996,7 +2043,9 @@ func (c *CommunityGalleryImageList) UnmarshalJSON(data []byte) error { func (c CommunityGalleryImageProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "architecture", c.Architecture) + populate(objectMap, "artifactTags", c.ArtifactTags) populate(objectMap, "disallowed", c.Disallowed) + populate(objectMap, "disclaimer", c.Disclaimer) populateDateTimeRFC3339(objectMap, "endOfLifeDate", c.EndOfLifeDate) populate(objectMap, "eula", c.Eula) populate(objectMap, "features", c.Features) @@ -2022,9 +2071,15 @@ func (c *CommunityGalleryImageProperties) UnmarshalJSON(data []byte) error { case "architecture": err = unpopulate(val, "Architecture", &c.Architecture) delete(rawMsg, key) + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &c.ArtifactTags) + delete(rawMsg, key) case "disallowed": err = unpopulate(val, "Disallowed", &c.Disallowed) delete(rawMsg, key) + case "disclaimer": + err = unpopulate(val, "Disclaimer", &c.Disclaimer) + delete(rawMsg, key) case "endOfLifeDate": err = unpopulateDateTimeRFC3339(val, "EndOfLifeDate", &c.EndOfLifeDate) delete(rawMsg, key) @@ -2140,6 +2195,8 @@ func (c *CommunityGalleryImageVersionList) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type CommunityGalleryImageVersionProperties. func (c CommunityGalleryImageVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "artifactTags", c.ArtifactTags) + populate(objectMap, "disclaimer", c.Disclaimer) populateDateTimeRFC3339(objectMap, "endOfLifeDate", c.EndOfLifeDate) populate(objectMap, "excludeFromLatest", c.ExcludeFromLatest) populateDateTimeRFC3339(objectMap, "publishedDate", c.PublishedDate) @@ -2156,6 +2213,12 @@ func (c *CommunityGalleryImageVersionProperties) UnmarshalJSON(data []byte) erro for key, val := range rawMsg { var err error switch key { + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &c.ArtifactTags) + delete(rawMsg, key) + case "disclaimer": + err = unpopulate(val, "Disclaimer", &c.Disclaimer) + delete(rawMsg, key) case "endOfLifeDate": err = unpopulateDateTimeRFC3339(val, "EndOfLifeDate", &c.EndOfLifeDate) delete(rawMsg, key) @@ -2223,6 +2286,84 @@ func (c *CommunityGalleryInfo) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type CommunityGalleryMetadata. +func (c CommunityGalleryMetadata) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eula", c.Eula) + populate(objectMap, "privacyStatementUri", c.PrivacyStatementURI) + populate(objectMap, "publicNames", c.PublicNames) + populate(objectMap, "publisherContact", c.PublisherContact) + populate(objectMap, "publisherUri", c.PublisherURI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CommunityGalleryMetadata. +func (c *CommunityGalleryMetadata) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eula": + err = unpopulate(val, "Eula", &c.Eula) + delete(rawMsg, key) + case "privacyStatementUri": + err = unpopulate(val, "PrivacyStatementURI", &c.PrivacyStatementURI) + delete(rawMsg, key) + case "publicNames": + err = unpopulate(val, "PublicNames", &c.PublicNames) + delete(rawMsg, key) + case "publisherContact": + err = unpopulate(val, "PublisherContact", &c.PublisherContact) + delete(rawMsg, key) + case "publisherUri": + err = unpopulate(val, "PublisherURI", &c.PublisherURI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CommunityGalleryProperties. +func (c CommunityGalleryProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "artifactTags", c.ArtifactTags) + populate(objectMap, "communityMetadata", c.CommunityMetadata) + populate(objectMap, "disclaimer", c.Disclaimer) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CommunityGalleryProperties. +func (c *CommunityGalleryProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &c.ArtifactTags) + delete(rawMsg, key) + case "communityMetadata": + err = unpopulate(val, "CommunityMetadata", &c.CommunityMetadata) + delete(rawMsg, key) + case "disclaimer": + err = unpopulate(val, "Disclaimer", &c.Disclaimer) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type CopyCompletionError. func (c CopyCompletionError) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2462,6 +2603,68 @@ func (d *DataDiskImageEncryption) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type DataDisksToAttach. +func (d DataDisksToAttach) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "diskId", d.DiskID) + populate(objectMap, "lun", d.Lun) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataDisksToAttach. +func (d *DataDisksToAttach) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "diskId": + err = unpopulate(val, "DiskID", &d.DiskID) + delete(rawMsg, key) + case "lun": + err = unpopulate(val, "Lun", &d.Lun) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DataDisksToDetach. +func (d DataDisksToDetach) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "detachOption", d.DetachOption) + populate(objectMap, "diskId", d.DiskID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DataDisksToDetach. +func (d *DataDisksToDetach) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "detachOption": + err = unpopulate(val, "DetachOption", &d.DetachOption) + delete(rawMsg, key) + case "diskId": + err = unpopulate(val, "DiskID", &d.DiskID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type DedicatedHost. func (d DedicatedHost) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -4324,6 +4527,33 @@ func (e *Encryption) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type EncryptionIdentity. +func (e EncryptionIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "userAssignedIdentityResourceId", e.UserAssignedIdentityResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EncryptionIdentity. +func (e *EncryptionIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "userAssignedIdentityResourceId": + err = unpopulate(val, "UserAssignedIdentityResourceID", &e.UserAssignedIdentityResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type EncryptionImages. func (e EncryptionImages) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -5833,6 +6063,7 @@ func (g GalleryImageVersionProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "publishingProfile", g.PublishingProfile) populate(objectMap, "replicationStatus", g.ReplicationStatus) populate(objectMap, "safetyProfile", g.SafetyProfile) + populate(objectMap, "securityProfile", g.SecurityProfile) populate(objectMap, "storageProfile", g.StorageProfile) return json.Marshal(objectMap) } @@ -5858,6 +6089,9 @@ func (g *GalleryImageVersionProperties) UnmarshalJSON(data []byte) error { case "safetyProfile": err = unpopulate(val, "SafetyProfile", &g.SafetyProfile) delete(rawMsg, key) + case "securityProfile": + err = unpopulate(val, "SecurityProfile", &g.SecurityProfile) + delete(rawMsg, key) case "storageProfile": err = unpopulate(val, "StorageProfile", &g.StorageProfile) delete(rawMsg, key) @@ -5994,6 +6228,37 @@ func (g *GalleryImageVersionStorageProfile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type GalleryImageVersionUefiSettings. +func (g GalleryImageVersionUefiSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "additionalSignatures", g.AdditionalSignatures) + populate(objectMap, "signatureTemplateNames", g.SignatureTemplateNames) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryImageVersionUefiSettings. +func (g *GalleryImageVersionUefiSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "additionalSignatures": + err = unpopulate(val, "AdditionalSignatures", &g.AdditionalSignatures) + delete(rawMsg, key) + case "signatureTemplateNames": + err = unpopulate(val, "SignatureTemplateNames", &g.SignatureTemplateNames) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type GalleryImageVersionUpdate. func (g GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -6822,6 +7087,33 @@ func (i *ImageUpdate) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ImageVersionSecurityProfile. +func (i ImageVersionSecurityProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "uefiSettings", i.UefiSettings) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageVersionSecurityProfile. +func (i *ImageVersionSecurityProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "uefiSettings": + err = unpopulate(val, "UefiSettings", &i.UefiSettings) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type InnerError. func (i InnerError) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -9320,6 +9612,41 @@ func (p *ProximityPlacementGroupUpdate) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ProxyAgentSettings. +func (p ProxyAgentSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", p.Enabled) + populate(objectMap, "keyIncarnationId", p.KeyIncarnationID) + populate(objectMap, "mode", p.Mode) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ProxyAgentSettings. +func (p *ProxyAgentSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &p.Enabled) + delete(rawMsg, key) + case "keyIncarnationId": + err = unpopulate(val, "KeyIncarnationID", &p.KeyIncarnationID) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &p.Mode) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ProxyOnlyResource. func (p ProxyOnlyResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -9682,6 +10009,91 @@ func (r *RequestRateByIntervalInput) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ResiliencyPolicy. +func (r ResiliencyPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "resilientVMCreationPolicy", r.ResilientVMCreationPolicy) + populate(objectMap, "resilientVMDeletionPolicy", r.ResilientVMDeletionPolicy) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ResiliencyPolicy. +func (r *ResiliencyPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "resilientVMCreationPolicy": + err = unpopulate(val, "ResilientVMCreationPolicy", &r.ResilientVMCreationPolicy) + delete(rawMsg, key) + case "resilientVMDeletionPolicy": + err = unpopulate(val, "ResilientVMDeletionPolicy", &r.ResilientVMDeletionPolicy) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ResilientVMCreationPolicy. +func (r ResilientVMCreationPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", r.Enabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ResilientVMCreationPolicy. +func (r *ResilientVMCreationPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &r.Enabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ResilientVMDeletionPolicy. +func (r ResilientVMDeletionPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", r.Enabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ResilientVMDeletionPolicy. +func (r *ResilientVMDeletionPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &r.Enabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type Resource. func (r Resource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -10154,6 +10566,33 @@ func (r *ResourceSKUsResult) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ResourceSharingProfile. +func (r ResourceSharingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "subscriptionIds", r.SubscriptionIDs) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceSharingProfile. +func (r *ResourceSharingProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "subscriptionIds": + err = unpopulate(val, "SubscriptionIDs", &r.SubscriptionIDs) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ResourceURIList. func (r ResourceURIList) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -10732,6 +11171,7 @@ func (r *RestorePointSourceVMOSDisk) UnmarshalJSON(data []byte) error { func (r RestorePointSourceVMStorageProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "dataDisks", r.DataDisks) + populate(objectMap, "diskControllerType", r.DiskControllerType) populate(objectMap, "osDisk", r.OSDisk) return json.Marshal(objectMap) } @@ -10748,6 +11188,9 @@ func (r *RestorePointSourceVMStorageProfile) UnmarshalJSON(data []byte) error { case "dataDisks": err = unpopulate(val, "DataDisks", &r.DataDisks) delete(rawMsg, key) + case "diskControllerType": + err = unpopulate(val, "DiskControllerType", &r.DiskControllerType) + delete(rawMsg, key) case "osDisk": err = unpopulate(val, "OSDisk", &r.OSDisk) delete(rawMsg, key) @@ -11600,6 +12043,33 @@ func (s *SSHConfiguration) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type SSHGenerateKeyPairInputParameters. +func (s SSHGenerateKeyPairInputParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "encryptionType", s.EncryptionType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SSHGenerateKeyPairInputParameters. +func (s *SSHGenerateKeyPairInputParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "encryptionType": + err = unpopulate(val, "EncryptionType", &s.EncryptionType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type SSHPublicKey. func (s SSHPublicKey) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -11899,6 +12369,8 @@ func (s *SecurityPostureReference) UnmarshalJSON(data []byte) error { func (s SecurityProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "encryptionAtHost", s.EncryptionAtHost) + populate(objectMap, "encryptionIdentity", s.EncryptionIdentity) + populate(objectMap, "proxyAgentSettings", s.ProxyAgentSettings) populate(objectMap, "securityType", s.SecurityType) populate(objectMap, "uefiSettings", s.UefiSettings) return json.Marshal(objectMap) @@ -11916,6 +12388,12 @@ func (s *SecurityProfile) UnmarshalJSON(data []byte) error { case "encryptionAtHost": err = unpopulate(val, "EncryptionAtHost", &s.EncryptionAtHost) delete(rawMsg, key) + case "encryptionIdentity": + err = unpopulate(val, "EncryptionIdentity", &s.EncryptionIdentity) + delete(rawMsg, key) + case "proxyAgentSettings": + err = unpopulate(val, "ProxyAgentSettings", &s.ProxyAgentSettings) + delete(rawMsg, key) case "securityType": err = unpopulate(val, "SecurityType", &s.SecurityType) delete(rawMsg, key) @@ -11990,6 +12468,7 @@ func (s SharedGallery) MarshalJSON() ([]byte, error) { populate(objectMap, "identifier", s.Identifier) populate(objectMap, "location", s.Location) populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) return json.Marshal(objectMap) } @@ -12011,6 +12490,9 @@ func (s *SharedGallery) UnmarshalJSON(data []byte) error { case "name": err = unpopulate(val, "Name", &s.Name) delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -12186,6 +12668,7 @@ func (s *SharedGalleryImageList) UnmarshalJSON(data []byte) error { func (s SharedGalleryImageProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "architecture", s.Architecture) + populate(objectMap, "artifactTags", s.ArtifactTags) populate(objectMap, "disallowed", s.Disallowed) populateDateTimeRFC3339(objectMap, "endOfLifeDate", s.EndOfLifeDate) populate(objectMap, "eula", s.Eula) @@ -12212,6 +12695,9 @@ func (s *SharedGalleryImageProperties) UnmarshalJSON(data []byte) error { case "architecture": err = unpopulate(val, "Architecture", &s.Architecture) delete(rawMsg, key) + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &s.ArtifactTags) + delete(rawMsg, key) case "disallowed": err = unpopulate(val, "Disallowed", &s.Disallowed) delete(rawMsg, key) @@ -12326,6 +12812,7 @@ func (s *SharedGalleryImageVersionList) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type SharedGalleryImageVersionProperties. func (s SharedGalleryImageVersionProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "artifactTags", s.ArtifactTags) populateDateTimeRFC3339(objectMap, "endOfLifeDate", s.EndOfLifeDate) populate(objectMap, "excludeFromLatest", s.ExcludeFromLatest) populateDateTimeRFC3339(objectMap, "publishedDate", s.PublishedDate) @@ -12342,6 +12829,9 @@ func (s *SharedGalleryImageVersionProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &s.ArtifactTags) + delete(rawMsg, key) case "endOfLifeDate": err = unpopulateDateTimeRFC3339(val, "EndOfLifeDate", &s.EndOfLifeDate) delete(rawMsg, key) @@ -12455,6 +12945,33 @@ func (s *SharedGalleryOSDiskImage) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type SharedGalleryProperties. +func (s SharedGalleryProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "artifactTags", s.ArtifactTags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SharedGalleryProperties. +func (s *SharedGalleryProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "artifactTags": + err = unpopulate(val, "ArtifactTags", &s.ArtifactTags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type SharingProfile. func (s SharingProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -13352,6 +13869,76 @@ func (t *ThrottledRequestsInput) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type UefiKey. +func (u UefiKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "type", u.Type) + populate(objectMap, "value", u.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UefiKey. +func (u *UefiKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &u.Type) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &u.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UefiKeySignatures. +func (u UefiKeySignatures) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "db", u.Db) + populate(objectMap, "dbx", u.Dbx) + populate(objectMap, "kek", u.Kek) + populate(objectMap, "pk", u.Pk) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UefiKeySignatures. +func (u *UefiKeySignatures) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "db": + err = unpopulate(val, "Db", &u.Db) + delete(rawMsg, key) + case "dbx": + err = unpopulate(val, "Dbx", &u.Dbx) + delete(rawMsg, key) + case "kek": + err = unpopulate(val, "Kek", &u.Kek) + delete(rawMsg, key) + case "pk": + err = unpopulate(val, "Pk", &u.Pk) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type UefiSettings. func (u UefiSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14120,10 +14707,12 @@ func (v *VirtualHardDisk) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type VirtualMachine. func (v VirtualMachine) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "etag", v.Etag) populate(objectMap, "extendedLocation", v.ExtendedLocation) populate(objectMap, "id", v.ID) populate(objectMap, "identity", v.Identity) populate(objectMap, "location", v.Location) + populate(objectMap, "managedBy", v.ManagedBy) populate(objectMap, "name", v.Name) populate(objectMap, "plan", v.Plan) populate(objectMap, "properties", v.Properties) @@ -14143,6 +14732,9 @@ func (v *VirtualMachine) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "etag": + err = unpopulate(val, "Etag", &v.Etag) + delete(rawMsg, key) case "extendedLocation": err = unpopulate(val, "ExtendedLocation", &v.ExtendedLocation) delete(rawMsg, key) @@ -14155,6 +14747,9 @@ func (v *VirtualMachine) UnmarshalJSON(data []byte) error { case "location": err = unpopulate(val, "Location", &v.Location) delete(rawMsg, key) + case "managedBy": + err = unpopulate(val, "ManagedBy", &v.ManagedBy) + delete(rawMsg, key) case "name": err = unpopulate(val, "Name", &v.Name) delete(rawMsg, key) @@ -15159,6 +15754,7 @@ func (v VirtualMachineInstanceView) MarshalJSON() ([]byte, error) { populate(objectMap, "disks", v.Disks) populate(objectMap, "extensions", v.Extensions) populate(objectMap, "hyperVGeneration", v.HyperVGeneration) + populate(objectMap, "isVMInStandbyPool", v.IsVMInStandbyPool) populate(objectMap, "maintenanceRedeployStatus", v.MaintenanceRedeployStatus) populate(objectMap, "osName", v.OSName) populate(objectMap, "osVersion", v.OSVersion) @@ -15199,6 +15795,9 @@ func (v *VirtualMachineInstanceView) UnmarshalJSON(data []byte) error { case "hyperVGeneration": err = unpopulate(val, "HyperVGeneration", &v.HyperVGeneration) delete(rawMsg, key) + case "isVMInStandbyPool": + err = unpopulate(val, "IsVMInStandbyPool", &v.IsVMInStandbyPool) + delete(rawMsg, key) case "maintenanceRedeployStatus": err = unpopulate(val, "MaintenanceRedeployStatus", &v.MaintenanceRedeployStatus) delete(rawMsg, key) @@ -16078,6 +16677,7 @@ func (v *VirtualMachineRunCommandsListResult) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSet. func (v VirtualMachineScaleSet) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "etag", v.Etag) populate(objectMap, "extendedLocation", v.ExtendedLocation) populate(objectMap, "id", v.ID) populate(objectMap, "identity", v.Identity) @@ -16101,6 +16701,9 @@ func (v *VirtualMachineScaleSet) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "etag": + err = unpopulate(val, "Etag", &v.Etag) + delete(rawMsg, key) case "extendedLocation": err = unpopulate(val, "ExtendedLocation", &v.ExtendedLocation) delete(rawMsg, key) @@ -17124,6 +17727,7 @@ func (v VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "priorityMixPolicy", v.PriorityMixPolicy) populate(objectMap, "provisioningState", v.ProvisioningState) populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "resiliencyPolicy", v.ResiliencyPolicy) populate(objectMap, "scaleInPolicy", v.ScaleInPolicy) populate(objectMap, "singlePlacementGroup", v.SinglePlacementGroup) populate(objectMap, "spotRestorePolicy", v.SpotRestorePolicy) @@ -17177,6 +17781,9 @@ func (v *VirtualMachineScaleSetProperties) UnmarshalJSON(data []byte) error { case "proximityPlacementGroup": err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) delete(rawMsg, key) + case "resiliencyPolicy": + err = unpopulate(val, "ResiliencyPolicy", &v.ResiliencyPolicy) + delete(rawMsg, key) case "scaleInPolicy": err = unpopulate(val, "ScaleInPolicy", &v.ScaleInPolicy) delete(rawMsg, key) @@ -17835,6 +18442,7 @@ func (v VirtualMachineScaleSetUpdateProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "overprovision", v.Overprovision) populate(objectMap, "priorityMixPolicy", v.PriorityMixPolicy) populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "resiliencyPolicy", v.ResiliencyPolicy) populate(objectMap, "scaleInPolicy", v.ScaleInPolicy) populate(objectMap, "singlePlacementGroup", v.SinglePlacementGroup) populate(objectMap, "spotRestorePolicy", v.SpotRestorePolicy) @@ -17870,6 +18478,9 @@ func (v *VirtualMachineScaleSetUpdateProperties) UnmarshalJSON(data []byte) erro case "proximityPlacementGroup": err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) delete(rawMsg, key) + case "resiliencyPolicy": + err = unpopulate(val, "ResiliencyPolicy", &v.ResiliencyPolicy) + delete(rawMsg, key) case "scaleInPolicy": err = unpopulate(val, "ScaleInPolicy", &v.ScaleInPolicy) delete(rawMsg, key) @@ -18072,6 +18683,7 @@ func (v *VirtualMachineScaleSetUpdateVMProfile) UnmarshalJSON(data []byte) error // MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVM. func (v VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "etag", v.Etag) populate(objectMap, "id", v.ID) populate(objectMap, "identity", v.Identity) populate(objectMap, "instanceId", v.InstanceID) @@ -18096,6 +18708,9 @@ func (v *VirtualMachineScaleSetVM) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "etag": + err = unpopulate(val, "Etag", &v.Etag) + delete(rawMsg, key) case "id": err = unpopulate(val, "ID", &v.ID) delete(rawMsg, key) @@ -18498,6 +19113,7 @@ func (v VirtualMachineScaleSetVMProfile) MarshalJSON() ([]byte, error) { populate(objectMap, "securityProfile", v.SecurityProfile) populate(objectMap, "serviceArtifactReference", v.ServiceArtifactReference) populate(objectMap, "storageProfile", v.StorageProfile) + populateDateTimeRFC3339(objectMap, "timeCreated", v.TimeCreated) populate(objectMap, "userData", v.UserData) return json.Marshal(objectMap) } @@ -18559,6 +19175,9 @@ func (v *VirtualMachineScaleSetVMProfile) UnmarshalJSON(data []byte) error { case "storageProfile": err = unpopulate(val, "StorageProfile", &v.StorageProfile) delete(rawMsg, key) + case "timeCreated": + err = unpopulateDateTimeRFC3339(val, "TimeCreated", &v.TimeCreated) + delete(rawMsg, key) case "userData": err = unpopulate(val, "UserData", &v.UserData) delete(rawMsg, key) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go index 545c016fc..f7fded74a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of compute operations. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go index 39d7b0f15..0c363663a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go @@ -400,6 +400,12 @@ type DedicatedHostsClientBeginDeleteOptions struct { ResumeToken string } +// DedicatedHostsClientBeginRedeployOptions contains the optional parameters for the DedicatedHostsClient.BeginRedeploy method. +type DedicatedHostsClientBeginRedeployOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + // DedicatedHostsClientBeginRestartOptions contains the optional parameters for the DedicatedHostsClient.BeginRestart method. type DedicatedHostsClientBeginRestartOptions struct { // Resumes the LRO from the provided token. @@ -956,7 +962,8 @@ type SSHPublicKeysClientDeleteOptions struct { // SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair // method. type SSHPublicKeysClientGenerateKeyPairOptions struct { - // placeholder for future optional parameters + // Parameters supplied to generate the SSH public key. + Parameters *SSHGenerateKeyPairInputParameters } // SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. @@ -1375,6 +1382,20 @@ type VirtualMachineScaleSetVMRunCommandsClientListOptions struct { Expand *string } +// VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade +// method. +type VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks +// method. +type VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + // VirtualMachineScaleSetVMsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginDeallocate // method. type VirtualMachineScaleSetVMsClientBeginDeallocateOptions struct { @@ -1458,6 +1479,14 @@ type VirtualMachineScaleSetVMsClientBeginStartOptions struct { // VirtualMachineScaleSetVMsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginUpdate // method. type VirtualMachineScaleSetVMsClientBeginUpdateOptions struct { + // The ETag of the transformation. Omit this value to always overwrite the current resource. Specify the last-seen ETag value + // to prevent accidentally overwriting concurrent changes. + IfMatch *string + + // Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values will result + // in error from server as they are not supported. + IfNoneMatch *string + // Resumes the LRO from the provided token. ResumeToken string } @@ -1505,9 +1534,27 @@ type VirtualMachineScaleSetVMsClientSimulateEvictionOptions struct { // placeholder for future optional parameters } +// VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade +// method. +type VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + // VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginCreateOrUpdate // method. type VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions struct { + // The ETag of the transformation. Omit this value to always overwrite the current resource. Specify the last-seen ETag value + // to prevent accidentally overwriting concurrent changes. + IfMatch *string + + // Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values will result + // in error from server as they are not supported. + IfNoneMatch *string + // Resumes the LRO from the provided token. ResumeToken string } @@ -1644,6 +1691,14 @@ type VirtualMachineScaleSetsClientBeginUpdateInstancesOptions struct { // VirtualMachineScaleSetsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginUpdate // method. type VirtualMachineScaleSetsClientBeginUpdateOptions struct { + // The ETag of the transformation. Omit this value to always overwrite the current resource. Specify the last-seen ETag value + // to prevent accidentally overwriting concurrent changes. + IfMatch *string + + // Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values will result + // in error from server as they are not supported. + IfNoneMatch *string + // Resumes the LRO from the provided token. ResumeToken string } @@ -1719,6 +1774,13 @@ type VirtualMachinesClientBeginAssessPatchesOptions struct { ResumeToken string } +// VirtualMachinesClientBeginAttachDetachDataDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginAttachDetachDataDisks +// method. +type VirtualMachinesClientBeginAttachDetachDataDisksOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + // VirtualMachinesClientBeginCaptureOptions contains the optional parameters for the VirtualMachinesClient.BeginCapture method. type VirtualMachinesClientBeginCaptureOptions struct { // Resumes the LRO from the provided token. @@ -1735,6 +1797,14 @@ type VirtualMachinesClientBeginConvertToManagedDisksOptions struct { // VirtualMachinesClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginCreateOrUpdate // method. type VirtualMachinesClientBeginCreateOrUpdateOptions struct { + // The ETag of the transformation. Omit this value to always overwrite the current resource. Specify the last-seen ETag value + // to prevent accidentally overwriting concurrent changes. + IfMatch *string + + // Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values will result + // in error from server as they are not supported. + IfNoneMatch *string + // Resumes the LRO from the provided token. ResumeToken string } @@ -1826,6 +1896,14 @@ type VirtualMachinesClientBeginStartOptions struct { // VirtualMachinesClientBeginUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginUpdate method. type VirtualMachinesClientBeginUpdateOptions struct { + // The ETag of the transformation. Omit this value to always overwrite the current resource. Specify the last-seen ETag value + // to prevent accidentally overwriting concurrent changes. + IfMatch *string + + // Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values will result + // in error from server as they are not supported. + IfNoneMatch *string + // Resumes the LRO from the provided token. ResumeToken string } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go index 7dbfd8f5b..947d83736 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go @@ -47,7 +47,7 @@ func NewProximityPlacementGroupsClient(subscriptionID string, credential azcore. // CreateOrUpdate - Create or update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Create Proximity Placement Group operation. @@ -95,7 +95,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateHandleResponse(resp // Delete - Delete a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientDeleteOptions contains the optional parameters for the ProximityPlacementGroupsClient.Delete @@ -162,7 +162,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co // Get - Retrieves information about a proximity placement group . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientGetOptions contains the optional parameters for the ProximityPlacementGroupsClient.Get @@ -221,7 +221,7 @@ func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Conte if options != nil && options.IncludeColocationStatus != nil { reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -238,7 +238,7 @@ func (client *ProximityPlacementGroupsClient) getHandleResponse(resp *http.Respo // NewListByResourceGroupPager - Lists all proximity placement groups in a resource group. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - ProximityPlacementGroupsClientListByResourceGroupOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListByResourceGroupPager // method. @@ -281,7 +281,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -298,7 +298,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupHandleResponse( // NewListBySubscriptionPager - Lists all proximity placement groups in a subscription. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - ProximityPlacementGroupsClientListBySubscriptionOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListBySubscriptionPager // method. func (client *ProximityPlacementGroupsClient) NewListBySubscriptionPager(options *ProximityPlacementGroupsClientListBySubscriptionOptions) *runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse] { @@ -336,7 +336,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionHandleResponse(r // Update - Update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Update Proximity Placement Group operation. @@ -402,7 +402,7 @@ func (client *ProximityPlacementGroupsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go index 360dfc34b..db4fc2bd3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go @@ -404,6 +404,11 @@ type DedicatedHostsClientListByHostGroupResponse struct { DedicatedHostListResult } +// DedicatedHostsClientRedeployResponse contains the response from method DedicatedHostsClient.BeginRedeploy. +type DedicatedHostsClientRedeployResponse struct { + // placeholder for future response values +} + // DedicatedHostsClientRestartResponse contains the response from method DedicatedHostsClient.BeginRestart. type DedicatedHostsClientRestartResponse struct { // placeholder for future response values @@ -1286,6 +1291,17 @@ type VirtualMachineScaleSetVMRunCommandsClientUpdateResponse struct { VirtualMachineRunCommand } +// VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse contains the response from method VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade. +type VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse contains the response from method VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks. +type VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse struct { + // Specifies the storage settings for the virtual machine disks. + StorageProfile +} + // VirtualMachineScaleSetVMsClientDeallocateResponse contains the response from method VirtualMachineScaleSetVMsClient.BeginDeallocate. type VirtualMachineScaleSetVMsClientDeallocateResponse struct { // placeholder for future response values @@ -1371,6 +1387,11 @@ type VirtualMachineScaleSetVMsClientUpdateResponse struct { VirtualMachineScaleSetVM } +// VirtualMachineScaleSetsClientApproveRollingUpgradeResponse contains the response from method VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade. +type VirtualMachineScaleSetsClientApproveRollingUpgradeResponse struct { + // placeholder for future response values +} + // VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse contains the response from method VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup. type VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse struct { // placeholder for future response values @@ -1514,6 +1535,12 @@ type VirtualMachinesClientAssessPatchesResponse struct { VirtualMachineAssessPatchesResult } +// VirtualMachinesClientAttachDetachDataDisksResponse contains the response from method VirtualMachinesClient.BeginAttachDetachDataDisks. +type VirtualMachinesClientAttachDetachDataDisksResponse struct { + // Specifies the storage settings for the virtual machine disks. + StorageProfile +} + // VirtualMachinesClientCaptureResponse contains the response from method VirtualMachinesClient.BeginCapture. type VirtualMachinesClientCaptureResponse struct { // Output of virtual machine capture operation. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go index 343e99867..b07477e6d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go @@ -48,7 +48,7 @@ func NewRestorePointCollectionsClient(subscriptionID string, credential azcore.T // for more details. When updating a restore point collection, only tags may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Create or Update restore point collection operation. @@ -96,7 +96,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -118,7 +118,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateHandleResponse(resp * // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - options - RestorePointCollectionsClientBeginDeleteOptions contains the optional parameters for the RestorePointCollectionsClient.BeginDelete @@ -144,7 +144,7 @@ func (client *RestorePointCollectionsClient) BeginDelete(ctx context.Context, re // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *RestorePointCollectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointCollectionsClient.BeginDelete" @@ -186,7 +186,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -195,7 +195,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con // Get - The operation to get the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - options - RestorePointCollectionsClientGetOptions contains the optional parameters for the RestorePointCollectionsClient.Get @@ -245,7 +245,7 @@ func (client *RestorePointCollectionsClient) getCreateRequest(ctx context.Contex if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *RestorePointCollectionsClient) getHandleResponse(resp *http.Respon // NewListPager - Gets the list of restore point collections in a resource group. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - RestorePointCollectionsClientListOptions contains the optional parameters for the RestorePointCollectionsClient.NewListPager // method. @@ -305,7 +305,7 @@ func (client *RestorePointCollectionsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *RestorePointCollectionsClient) listHandleResponse(resp *http.Respo // to get the next page of restore point collections. Do this till nextLink is not null to fetch all // the restore point collections. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - RestorePointCollectionsClientListAllOptions contains the optional parameters for the RestorePointCollectionsClient.NewListAllPager // method. func (client *RestorePointCollectionsClient) NewListAllPager(options *RestorePointCollectionsClientListAllOptions) *runtime.Pager[RestorePointCollectionsClientListAllResponse] { @@ -362,7 +362,7 @@ func (client *RestorePointCollectionsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,7 +380,7 @@ func (client *RestorePointCollectionsClient) listAllHandleResponse(resp *http.Re // Update - The operation to update the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Update restore point collection operation. @@ -428,7 +428,7 @@ func (client *RestorePointCollectionsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go index a04794bbb..7b6364f92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go @@ -47,7 +47,7 @@ func NewRestorePointsClient(subscriptionID string, credential azcore.TokenCreden // BeginCreate - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -74,7 +74,7 @@ func (client *RestorePointsClient) BeginCreate(ctx context.Context, resourceGrou // Create - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *RestorePointsClient) create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginCreate" @@ -120,7 +120,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso // BeginDelete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - restorePointName - The name of the restore point. @@ -158,7 +158,7 @@ func (client *RestorePointsClient) BeginDelete(ctx context.Context, resourceGrou // Delete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *RestorePointsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso // Get - The operation to get the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -267,7 +267,7 @@ func (client *RestorePointsClient) getCreateRequest(ctx context.Context, resourc if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go index b1239fadd..4bf2d0960 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go @@ -47,7 +47,7 @@ func NewSharedGalleriesClient(subscriptionID string, credential azcore.TokenCred // Get - Get a shared gallery by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *SharedGalleriesClient) getCreateRequest(ctx context.Context, locat return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -110,7 +110,7 @@ func (client *SharedGalleriesClient) getHandleResponse(resp *http.Response) (Sha // NewListPager - List shared galleries by subscription id or tenant id. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - options - SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.NewListPager // method. @@ -153,7 +153,7 @@ func (client *SharedGalleriesClient) listCreateRequest(ctx context.Context, loca return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go index 461801f26..ad549b140 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImagesClient(subscriptionID string, credential azcore.Token // Get - Get a shared gallery image by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -98,7 +98,7 @@ func (client *SharedGalleryImagesClient) getCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -115,7 +115,7 @@ func (client *SharedGalleryImagesClient) getHandleResponse(resp *http.Response) // NewListPager - List shared gallery images by subscription id or tenant id. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.NewListPager @@ -163,7 +163,7 @@ func (client *SharedGalleryImagesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go index 2f0c5b048..5c64dc844 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImageVersionsClient(subscriptionID string, credential azcor // Get - Get a shared gallery image version by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -106,7 +106,7 @@ func (client *SharedGalleryImageVersionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *SharedGalleryImageVersionsClient) getHandleResponse(resp *http.Res // NewListPager - List shared gallery image versions by subscription id or tenant id. // -// Generated from API version 2022-03-03 +// Generated from API version 2022-08-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -176,7 +176,7 @@ func (client *SharedGalleryImageVersionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-03-03") + reqQP.Set("api-version", "2022-08-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go index 5d08d5596..a577d55d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go @@ -47,7 +47,7 @@ func NewSSHPublicKeysClient(subscriptionID string, credential azcore.TokenCreden // Create - Creates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to create the SSH public key. @@ -94,7 +94,7 @@ func (client *SSHPublicKeysClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SSHPublicKeysClient) createHandleResponse(resp *http.Response) (SS // Delete - Delete an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientDeleteOptions contains the optional parameters for the SSHPublicKeysClient.Delete method. @@ -160,7 +160,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso // SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair @@ -218,9 +218,15 @@ func (client *SSHPublicKeysClient) generateKeyPairCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + if err := runtime.MarshalAsJSON(req, *options.Parameters); err != nil { + return nil, err + } + return req, nil + } return req, nil } @@ -236,7 +242,7 @@ func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Resp // Get - Retrieves information about an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. @@ -282,7 +288,7 @@ func (client *SSHPublicKeysClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +306,7 @@ func (client *SSHPublicKeysClient) getHandleResponse(resp *http.Response) (SSHPu // NewListByResourceGroupPager - Lists all of the SSH public keys in the specified resource group. Use the nextLink property // in the response to get the next page of SSH public keys. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - SSHPublicKeysClientListByResourceGroupOptions contains the optional parameters for the SSHPublicKeysClient.NewListByResourceGroupPager // method. @@ -343,7 +349,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -361,7 +367,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupHandleResponse(resp *http. // NewListBySubscriptionPager - Lists all of the SSH public keys in the subscription. Use the nextLink property in the response // to get the next page of SSH public keys. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - SSHPublicKeysClientListBySubscriptionOptions contains the optional parameters for the SSHPublicKeysClient.NewListBySubscriptionPager // method. func (client *SSHPublicKeysClient) NewListBySubscriptionPager(options *SSHPublicKeysClientListBySubscriptionOptions) *runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse] { @@ -399,7 +405,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -417,7 +423,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionHandleResponse(resp *http.R // Update - Updates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to update the SSH public key. @@ -464,7 +470,7 @@ func (client *SSHPublicKeysClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go index 2ea544447..afb6852b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go @@ -47,7 +47,7 @@ func NewUsageClient(subscriptionID string, credential azcore.TokenCredential, op // NewListPager - Gets, for the specified location, the current compute resource usage information as well as the limits for // compute resources under the subscription. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location for which resource usage is queried. // - options - UsageClientListOptions contains the optional parameters for the UsageClient.NewListPager method. func (client *UsageClient) NewListPager(location string, options *UsageClientListOptions) *runtime.Pager[UsageClientListResponse] { @@ -89,7 +89,7 @@ func (client *UsageClient) listCreateRequest(ctx context.Context, location strin return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go index 0e0b62612..a8e90c4b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineExtensionImagesClient(subscriptionID string, credential az // Get - Gets a virtual machine extension image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientGetOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.Get // method. @@ -102,7 +102,7 @@ func (client *VirtualMachineExtensionImagesClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionImagesClient) getHandleResponse(resp *http. // ListTypes - Gets a list of virtual machine extension image types. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListTypesOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListTypes // method. @@ -166,7 +166,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -184,7 +184,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesHandleResponse(resp // ListVersions - Gets a list of virtual machine extension image versions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListVersionsOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListVersions // method. @@ -243,7 +243,7 @@ func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go index f1cba272c..9d658bb4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineExtensionsClient(subscriptionID string, credential azcore. // BeginCreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be created or updated. // - vmExtensionName - The name of the virtual machine extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineExtensionsClient) BeginCreateOrUpdate(ctx context.Co // CreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be deleted. // - vmExtensionName - The name of the virtual machine extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineExtensionsClient) BeginDelete(ctx context.Context, r // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - vmExtensionName - The name of the virtual machine extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineExtensionsClient) getCreateRequest(ctx context.Conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *VirtualMachineExtensionsClient) getHandleResponse(resp *http.Respo // List - The operation to get all extensions of a Virtual Machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - options - VirtualMachineExtensionsClientListOptions contains the optional parameters for the VirtualMachineExtensionsClient.List @@ -336,7 +336,7 @@ func (client *VirtualMachineExtensionsClient) listCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *VirtualMachineExtensionsClient) listHandleResponse(resp *http.Resp // BeginUpdate - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be updated. // - vmExtensionName - The name of the virtual machine extension. @@ -381,7 +381,7 @@ func (client *VirtualMachineExtensionsClient) BeginUpdate(ctx context.Context, r // Update - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineExtensionsClient) update(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginUpdate" @@ -427,7 +427,7 @@ func (client *VirtualMachineExtensionsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go index 42f941766..1c5ab27e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesClient(subscriptionID string, credential azcore.Toke // Get - Gets a virtual machine image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -110,7 +110,7 @@ func (client *VirtualMachineImagesClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -128,7 +128,7 @@ func (client *VirtualMachineImagesClient) getHandleResponse(resp *http.Response) // List - Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -194,7 +194,7 @@ func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *VirtualMachineImagesClient) listHandleResponse(resp *http.Response // ListByEdgeZone - Gets a list of all virtual machine image versions for the specified edge zone // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesClientListByEdgeZoneOptions contains the optional parameters for the VirtualMachineImagesClient.ListByEdgeZone @@ -259,7 +259,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -277,7 +277,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneHandleResponse(resp *htt // ListOffers - Gets a list of virtual machine image offers for the specified location and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - options - VirtualMachineImagesClientListOffersOptions contains the optional parameters for the VirtualMachineImagesClient.ListOffers @@ -324,7 +324,7 @@ func (client *VirtualMachineImagesClient) listOffersCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -342,7 +342,7 @@ func (client *VirtualMachineImagesClient) listOffersHandleResponse(resp *http.Re // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - options - VirtualMachineImagesClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesClient.ListPublishers // method. @@ -384,7 +384,7 @@ func (client *VirtualMachineImagesClient) listPublishersCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -402,7 +402,7 @@ func (client *VirtualMachineImagesClient) listPublishersHandleResponse(resp *htt // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -454,7 +454,7 @@ func (client *VirtualMachineImagesClient) listSKUsCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go index f946cd4d8..4151b10d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesEdgeZoneClient(subscriptionID string, credential azc // Get - Gets a virtual machine image in an edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -115,7 +115,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -133,7 +133,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.R // List - Gets a list of all virtual machine image versions for the specified location, edge zone, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -204,7 +204,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context. if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -222,7 +222,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listHandleResponse(resp *http. // ListOffers - Gets a list of virtual machine image offers for the specified location, edge zone and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -274,7 +274,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -292,7 +292,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersHandleResponse(resp // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location and edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesEdgeZoneClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListPublishers @@ -339,7 +339,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersHandleResponse(r // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -414,7 +414,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listSKUsCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go index 6b40675ef..348c59ade 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineRunCommandsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be created or updated. // - runCommandName - The name of the virtual machine run command. @@ -74,7 +74,7 @@ func (client *VirtualMachineRunCommandsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be deleted. // - runCommandName - The name of the virtual machine run command. @@ -158,7 +158,7 @@ func (client *VirtualMachineRunCommandsClient) BeginDelete(ctx context.Context, // Delete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C // Get - Gets specific run command for a subscription in a location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location upon which run commands is queried. // - commandID - The command id. // - options - VirtualMachineRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineRunCommandsClient.Get @@ -260,7 +260,7 @@ func (client *VirtualMachineRunCommandsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -278,7 +278,7 @@ func (client *VirtualMachineRunCommandsClient) getHandleResponse(resp *http.Resp // GetByVirtualMachine - The operation to get the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - runCommandName - The name of the virtual machine run command. @@ -333,7 +333,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineCreateRequest( if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -350,7 +350,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineHandleResponse // NewListPager - Lists all available run commands for a subscription in a location. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location upon which run commands is queried. // - options - VirtualMachineRunCommandsClientListOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListPager // method. @@ -393,7 +393,7 @@ func (client *VirtualMachineRunCommandsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -410,7 +410,7 @@ func (client *VirtualMachineRunCommandsClient) listHandleResponse(resp *http.Res // NewListByVirtualMachinePager - The operation to get all run commands of a Virtual Machine. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - options - VirtualMachineRunCommandsClientListByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListByVirtualMachinePager @@ -461,7 +461,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineCreateRequest if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -479,7 +479,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineHandleRespons // BeginUpdate - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be updated. // - runCommandName - The name of the virtual machine run command. @@ -506,7 +506,7 @@ func (client *VirtualMachineRunCommandsClient) BeginUpdate(ctx context.Context, // Update - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginUpdate" @@ -552,7 +552,7 @@ func (client *VirtualMachineRunCommandsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go index e02d32dc6..b665b49f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go @@ -48,7 +48,7 @@ func NewVirtualMachinesClient(subscriptionID string, credential azcore.TokenCred // BeginAssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginAssessPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginAssessPatches @@ -74,7 +74,7 @@ func (client *VirtualMachinesClient) BeginAssessPatches(ctx context.Context, res // AssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) assessPatches(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginAssessPatches" @@ -116,17 +116,98 @@ func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil } +// BeginAttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +// - resourceGroupName - The name of the resource group. +// - vmName - The name of the virtual machine. +// - parameters - Parameters supplied to the attach and detach data disks operation on the virtual machine. +// - options - VirtualMachinesClientBeginAttachDetachDataDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginAttachDetachDataDisks +// method. +func (client *VirtualMachinesClient) BeginAttachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters AttachDetachDataDisksRequest, options *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[VirtualMachinesClientAttachDetachDataDisksResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.attachDetachDataDisks(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualMachinesClientAttachDetachDataDisksResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualMachinesClientAttachDetachDataDisksResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// AttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +func (client *VirtualMachinesClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters AttachDetachDataDisksRequest, options *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { + var err error + const operationName = "VirtualMachinesClient.BeginAttachDetachDataDisks" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.attachDetachDataDisksCreateRequest(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// attachDetachDataDisksCreateRequest creates the AttachDetachDataDisks request. +func (client *VirtualMachinesClient) attachDetachDataDisksCreateRequest(ctx context.Context, resourceGroupName string, vmName string, parameters AttachDetachDataDisksRequest, options *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/attachDetachDataDisks" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + // BeginCapture - Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create // similar VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Capture Virtual Machine operation. @@ -154,7 +235,7 @@ func (client *VirtualMachinesClient) BeginCapture(ctx context.Context, resourceG // VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) capture(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCapture" @@ -196,7 +277,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -209,7 +290,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginConvertToManagedDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginConvertToManagedDisks @@ -235,7 +316,7 @@ func (client *VirtualMachinesClient) BeginConvertToManagedDisks(ctx context.Cont // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) convertToManagedDisks(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginConvertToManagedDisks" @@ -277,7 +358,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -287,7 +368,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont // during virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Create Virtual Machine operation. @@ -314,7 +395,7 @@ func (client *VirtualMachinesClient) BeginCreateOrUpdate(ctx context.Context, re // virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCreateOrUpdate" @@ -356,8 +437,14 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{*options.IfMatch} + } + if options != nil && options.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} + } req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err @@ -369,7 +456,7 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con // resources that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeallocateOptions contains the optional parameters for the VirtualMachinesClient.BeginDeallocate @@ -395,7 +482,7 @@ func (client *VirtualMachinesClient) BeginDeallocate(ctx context.Context, resour // that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) deallocate(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDeallocate" @@ -440,7 +527,7 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -449,7 +536,7 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context // BeginDelete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeleteOptions contains the optional parameters for the VirtualMachinesClient.BeginDelete @@ -474,7 +561,7 @@ func (client *VirtualMachinesClient) BeginDelete(ctx context.Context, resourceGr // Delete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDelete" @@ -519,7 +606,7 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -532,7 +619,7 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re // [https://docs.microsoft.com/azure/virtual-machines/linux/capture-image]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGeneralizeOptions contains the optional parameters for the VirtualMachinesClient.Generalize @@ -578,7 +665,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -587,7 +674,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context // Get - Retrieves information about the model view or the instance view of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGetOptions contains the optional parameters for the VirtualMachinesClient.Get method. @@ -636,7 +723,7 @@ func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resou if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -654,7 +741,7 @@ func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (Vir // BeginInstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - installPatchesInput - Input for InstallPatches as directly received by the API @@ -681,7 +768,7 @@ func (client *VirtualMachinesClient) BeginInstallPatches(ctx context.Context, re // InstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) installPatches(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginInstallPatches" @@ -723,7 +810,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, installPatchesInput); err != nil { @@ -735,7 +822,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con // InstanceView - Retrieves information about the run-time state of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientInstanceViewOptions contains the optional parameters for the VirtualMachinesClient.InstanceView @@ -782,7 +869,7 @@ func (client *VirtualMachinesClient) instanceViewCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -800,7 +887,7 @@ func (client *VirtualMachinesClient) instanceViewHandleResponse(resp *http.Respo // NewListPager - Lists all of the virtual machines in the specified resource group. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachinesClientListOptions contains the optional parameters for the VirtualMachinesClient.NewListPager // method. @@ -849,7 +936,7 @@ func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, reso if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -867,7 +954,7 @@ func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (Vi // NewListAllPager - Lists all of the virtual machines in the specified subscription. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - VirtualMachinesClientListAllOptions contains the optional parameters for the VirtualMachinesClient.NewListAllPager // method. func (client *VirtualMachinesClient) NewListAllPager(options *VirtualMachinesClientListAllOptions) *runtime.Pager[VirtualMachinesClientListAllResponse] { @@ -905,7 +992,7 @@ func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") if options != nil && options.StatusOnly != nil { reqQP.Set("statusOnly", *options.StatusOnly) } @@ -931,7 +1018,7 @@ func (client *VirtualMachinesClient) listAllHandleResponse(resp *http.Response) // NewListAvailableSizesPager - Lists all available virtual machine sizes to which the specified virtual machine can be resized. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientListAvailableSizesOptions contains the optional parameters for the VirtualMachinesClient.NewListAvailableSizesPager @@ -980,7 +1067,7 @@ func (client *VirtualMachinesClient) listAvailableSizesCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -997,7 +1084,7 @@ func (client *VirtualMachinesClient) listAvailableSizesHandleResponse(resp *http // NewListByLocationPager - Gets all the virtual machines under the specified subscription for the specified location. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location for which virtual machines under the subscription are queried. // - options - VirtualMachinesClientListByLocationOptions contains the optional parameters for the VirtualMachinesClient.NewListByLocationPager // method. @@ -1040,7 +1127,7 @@ func (client *VirtualMachinesClient) listByLocationCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1058,7 +1145,7 @@ func (client *VirtualMachinesClient) listByLocationHandleResponse(resp *http.Res // BeginPerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachinesClient.BeginPerformMaintenance @@ -1083,7 +1170,7 @@ func (client *VirtualMachinesClient) BeginPerformMaintenance(ctx context.Context // PerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) performMaintenance(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPerformMaintenance" @@ -1125,7 +1212,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1135,7 +1222,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context // provisioned resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPowerOffOptions contains the optional parameters for the VirtualMachinesClient.BeginPowerOff @@ -1161,7 +1248,7 @@ func (client *VirtualMachinesClient) BeginPowerOff(ctx context.Context, resource // resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) powerOff(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPowerOff" @@ -1206,7 +1293,7 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1215,7 +1302,7 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, // BeginReapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReapplyOptions contains the optional parameters for the VirtualMachinesClient.BeginReapply @@ -1240,7 +1327,7 @@ func (client *VirtualMachinesClient) BeginReapply(ctx context.Context, resourceG // Reapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) reapply(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReapply" @@ -1282,7 +1369,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1291,7 +1378,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r // BeginRedeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRedeployOptions contains the optional parameters for the VirtualMachinesClient.BeginRedeploy @@ -1316,7 +1403,7 @@ func (client *VirtualMachinesClient) BeginRedeploy(ctx context.Context, resource // Redeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) redeploy(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRedeploy" @@ -1358,7 +1445,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1371,7 +1458,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReimageOptions contains the optional parameters for the VirtualMachinesClient.BeginReimage @@ -1400,7 +1487,7 @@ func (client *VirtualMachinesClient) BeginReimage(ctx context.Context, resourceG // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) reimage(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReimage" @@ -1442,7 +1529,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Parameters != nil { @@ -1457,7 +1544,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r // BeginRestart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRestartOptions contains the optional parameters for the VirtualMachinesClient.BeginRestart @@ -1482,7 +1569,7 @@ func (client *VirtualMachinesClient) BeginRestart(ctx context.Context, resourceG // Restart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) restart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRestart" @@ -1524,7 +1611,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1533,7 +1620,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r // RetrieveBootDiagnosticsData - The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachinesClient.RetrieveBootDiagnosticsData @@ -1583,7 +1670,7 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ct if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1601,7 +1688,7 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataHandleResponse(r // BeginRunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Run command operation. @@ -1628,7 +1715,7 @@ func (client *VirtualMachinesClient) BeginRunCommand(ctx context.Context, resour // RunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) runCommand(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRunCommand" @@ -1670,7 +1757,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1682,7 +1769,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context // SimulateEviction - The operation to simulate the eviction of spot virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientSimulateEvictionOptions contains the optional parameters for the VirtualMachinesClient.SimulateEviction @@ -1728,7 +1815,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1737,7 +1824,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C // BeginStart - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginStartOptions contains the optional parameters for the VirtualMachinesClient.BeginStart @@ -1762,7 +1849,7 @@ func (client *VirtualMachinesClient) BeginStart(ctx context.Context, resourceGro // Start - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) start(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginStart" @@ -1804,7 +1891,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1813,7 +1900,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res // BeginUpdate - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Update Virtual Machine operation. @@ -1839,7 +1926,7 @@ func (client *VirtualMachinesClient) BeginUpdate(ctx context.Context, resourceGr // Update - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachinesClient) update(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginUpdate" @@ -1881,8 +1968,14 @@ func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{*options.IfMatch} + } + if options != nil && options.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} + } req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go index 8e5820d44..585b42657 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string, credential // BeginCreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginCreateOrUpdate(ctx co // CreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be deleted. // - vmssExtensionName - The name of the VM scale set extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginDelete(ctx context.Co // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - vmssExtensionName - The name of the VM scale set extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getCreateRequest(ctx conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getHandleResponse(resp *ht // NewListPager - Gets a list of all extensions in a VM scale set. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - options - VirtualMachineScaleSetExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.NewListPager @@ -333,7 +333,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -351,7 +351,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listHandleResponse(resp *h // BeginUpdate - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -378,7 +378,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginUpdate(ctx context.Co // Update - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) updateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go index 7856e8e69..9ef8a1902 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string, crede // BeginCancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginCancel @@ -72,7 +72,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginCancel(ctx conte // Cancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) cancel(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginCancel" @@ -114,7 +114,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c // GetLatest - Gets the status of the latest virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.GetLatest @@ -170,7 +170,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -190,7 +190,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestHandleRespon // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions contains the optional parameters @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartExtensionUp // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade" @@ -259,7 +259,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -270,7 +270,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions contains the optional parameters for the @@ -297,7 +297,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartOSUpgrade(c // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade" @@ -339,7 +339,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgradeCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go index dc9276c62..eb6506c15 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go @@ -45,10 +45,94 @@ func NewVirtualMachineScaleSetsClient(subscriptionID string, credential azcore.T return client, nil } +// BeginApproveRollingUpgrade - Approve upgrade on deferred rolling upgrades for OS disks in the virtual machines in a VM +// scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +// - resourceGroupName - The name of the resource group. +// - vmScaleSetName - The name of the VM scale set. +// - options - VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade +// method. +func (client *VirtualMachineScaleSetsClient) BeginApproveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetsClientApproveRollingUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.approveRollingUpgrade(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualMachineScaleSetsClientApproveRollingUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualMachineScaleSetsClientApproveRollingUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// ApproveRollingUpgrade - Approve upgrade on deferred rolling upgrades for OS disks in the virtual machines in a VM scale +// set. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +func (client *VirtualMachineScaleSetsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { + var err error + const operationName = "VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.approveRollingUpgradeCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// approveRollingUpgradeCreateRequest creates the ApproveRollingUpgrade request. +func (client *VirtualMachineScaleSetsClient) approveRollingUpgradeCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/approveRollingUpgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + if err := runtime.MarshalAsJSON(req, *options.VMInstanceIDs); err != nil { + return nil, err + } + return req, nil + } + return req, nil +} + // ConvertToSinglePlacementGroup - Converts SinglePlacementGroup property to false for a existing virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for ConvertToSinglePlacementGroup API. @@ -95,7 +179,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -107,7 +191,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate // BeginCreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -133,7 +217,7 @@ func (client *VirtualMachineScaleSetsClient) BeginCreateOrUpdate(ctx context.Con // CreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginCreateOrUpdate" @@ -175,8 +259,14 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{*options.IfMatch} + } + if options != nil && options.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} + } req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err @@ -189,7 +279,7 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeallocate @@ -216,7 +306,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeallocate(ctx context.Context // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeallocate" @@ -261,7 +351,7 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -276,7 +366,7 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context // BeginDelete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDelete @@ -301,7 +391,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDelete(ctx context.Context, re // Delete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDelete" @@ -346,7 +436,7 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -355,7 +445,7 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con // BeginDeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -381,7 +471,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeleteInstances(ctx context.Co // DeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) deleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeleteInstances" @@ -426,7 +516,7 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { @@ -439,7 +529,7 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co // service fabric virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - platformUpdateDomain - The platform update domain for which a manual recovery walk is requested @@ -487,7 +577,7 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") reqQP.Set("platformUpdateDomain", strconv.FormatInt(int64(platformUpdateDomain), 10)) if options != nil && options.Zone != nil { reqQP.Set("zone", *options.Zone) @@ -512,7 +602,7 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU // Get - Display information about a virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetsClient.Get @@ -559,7 +649,7 @@ func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } @@ -580,7 +670,7 @@ func (client *VirtualMachineScaleSetsClient) getHandleResponse(resp *http.Respon // GetInstanceView - Gets the status of a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetInstanceView @@ -627,7 +717,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -644,7 +734,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewHandleResponse(resp // NewGetOSUpgradeHistoryPager - Gets list of OS upgrades on a VM scale set instance. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager @@ -692,7 +782,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -709,7 +799,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryHandleResponse(r // NewListPager - Gets a list of all VM scale sets under a resource group. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachineScaleSetsClientListOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListPager // method. @@ -752,7 +842,7 @@ func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -771,7 +861,7 @@ func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Respo // nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is // null to fetch all the VM Scale Sets. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - options - VirtualMachineScaleSetsClientListAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListAllPager // method. func (client *VirtualMachineScaleSetsClient) NewListAllPager(options *VirtualMachineScaleSetsClientListAllOptions) *runtime.Pager[VirtualMachineScaleSetsClientListAllResponse] { @@ -809,7 +899,7 @@ func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -826,7 +916,7 @@ func (client *VirtualMachineScaleSetsClient) listAllHandleResponse(resp *http.Re // NewListByLocationPager - Gets all the VM scale sets under the specified subscription for the specified location. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location for which VM scale sets under the subscription are queried. // - options - VirtualMachineScaleSetsClientListByLocationOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListByLocationPager // method. @@ -869,7 +959,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -887,7 +977,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationHandleResponse(resp * // NewListSKUsPager - Gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances // allowed for each SKU. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientListSKUsOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListSKUsPager @@ -935,7 +1025,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -955,7 +1045,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsHandleResponse(resp *http.R // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPerformMaintenance @@ -982,7 +1072,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPerformMaintenance(ctx context // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPerformMaintenance" @@ -1024,7 +1114,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1041,7 +1131,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPowerOff @@ -1068,7 +1158,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPowerOff(ctx context.Context, // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPowerOff" @@ -1113,7 +1203,7 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1128,7 +1218,7 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C // BeginReapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReapplyOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReapply @@ -1154,7 +1244,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReapply(ctx context.Context, r // Reapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) reapply(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReapply" @@ -1196,7 +1286,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1206,7 +1296,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co // them back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRedeploy @@ -1232,7 +1322,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRedeploy(ctx context.Context, // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRedeploy" @@ -1274,7 +1364,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1291,7 +1381,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimage @@ -1318,7 +1408,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimage(ctx context.Context, r // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimage" @@ -1360,7 +1450,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetReimageInput != nil { @@ -1376,7 +1466,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimageAll @@ -1402,7 +1492,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimageAll(ctx context.Context // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimageAll" @@ -1444,7 +1534,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1459,7 +1549,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context // BeginRestart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRestart @@ -1484,7 +1574,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRestart(ctx context.Context, r // Restart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRestart" @@ -1526,7 +1616,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1541,7 +1631,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co // BeginSetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for SetOrchestrationServiceState API. @@ -1567,7 +1657,7 @@ func (client *VirtualMachineScaleSetsClient) BeginSetOrchestrationServiceState(c // SetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceState(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState" @@ -1609,7 +1699,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1621,7 +1711,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR // BeginStart - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginStart @@ -1646,7 +1736,7 @@ func (client *VirtualMachineScaleSetsClient) BeginStart(ctx context.Context, res // Start - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginStart" @@ -1688,7 +1778,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1703,7 +1793,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont // BeginUpdate - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -1729,7 +1819,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdate(ctx context.Context, re // Update - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdate" @@ -1771,8 +1861,14 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{*options.IfMatch} + } + if options != nil && options.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} + } req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err @@ -1783,7 +1879,7 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con // BeginUpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -1809,7 +1905,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdateInstances(ctx context.Co // UpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetsClient) updateInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdateInstances" @@ -1851,7 +1947,7 @@ func (client *VirtualMachineScaleSetsClient) updateInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go index 8d657b688..ff2347623 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string, credenti // BeginCreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ // BeginDelete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginDelete(ctx context. // Delete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getCreateRequest(ctx con if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -301,7 +301,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getHandleResponse(resp * // List - The operation to get all extensions of an instance in Virtual Machine Scaleset. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginUpdate(ctx context. // Update - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go index beb09f99b..9736973fc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMRunCommandsClient(subscriptionID string, credent // BeginCreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq // BeginDelete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginDelete(ctx context // Delete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -300,7 +300,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp // NewListPager - The operation to get all run commands of an instance in Virtual Machine Scaleset. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listCreateRequest(ctx c if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginUpdate(ctx context // Update - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go index e27cc0a64..e1da5fc11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go @@ -45,12 +45,179 @@ func NewVirtualMachineScaleSetVMsClient(subscriptionID string, credential azcore return client, nil } +// BeginApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +// - resourceGroupName - The name of the resource group. +// - vmScaleSetName - The name of the VM scale set. +// - instanceID - The instance ID of the virtual machine. +// - options - VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginApproveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.approveRollingUpgrade(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualMachineScaleSetVMsClientApproveRollingUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// ApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { + var err error + const operationName = "VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.approveRollingUpgradeCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// approveRollingUpgradeCreateRequest creates the ApproveRollingUpgrade request. +func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgradeCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/approveRollingUpgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginAttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +// - resourceGroupName - The name of the resource group. +// - vmScaleSetName - The name of the VM scale set. +// - instanceID - The instance ID of the virtual machine. +// - parameters - Parameters supplied to the attach and detach data disks operation on a Virtual Machine Scale Sets VM. +// - options - VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginAttachDetachDataDisks(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters AttachDetachDataDisksRequest, options *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.attachDetachDataDisks(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[VirtualMachineScaleSetVMsClientAttachDetachDataDisksResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// AttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-09-01 +func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters AttachDetachDataDisksRequest, options *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { + var err error + const operationName = "VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.attachDetachDataDisksCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// attachDetachDataDisksCreateRequest creates the AttachDetachDataDisks request. +func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisksCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters AttachDetachDataDisksRequest, options *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/attachDetachDataDisks" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + // BeginDeallocate - Deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases // the compute resources it uses. You are not billed for the compute resources of this virtual // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -78,7 +245,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDeallocate(ctx context.Conte // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDeallocate" @@ -124,7 +291,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -133,7 +300,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte // BeginDelete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -159,7 +326,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDelete(ctx context.Context, // Delete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDelete" @@ -208,7 +375,7 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -217,7 +384,7 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C // Get - Gets a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -272,7 +439,7 @@ func (client *VirtualMachineScaleSetVMsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -290,7 +457,7 @@ func (client *VirtualMachineScaleSetVMsClient) getHandleResponse(resp *http.Resp // GetInstanceView - Gets the status of a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -342,7 +509,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -359,7 +526,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewHandleResponse(res // NewListPager - Gets a list of all virtual machines in a VM scale sets. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - virtualMachineScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetVMsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.NewListPager @@ -416,7 +583,7 @@ func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Con if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -434,7 +601,7 @@ func (client *VirtualMachineScaleSetVMsClient) listHandleResponse(resp *http.Res // BeginPerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -460,7 +627,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPerformMaintenance(ctx conte // PerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPerformMaintenance" @@ -506,7 +673,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -517,7 +684,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -545,7 +712,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPowerOff(ctx context.Context // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPowerOff" @@ -594,7 +761,7 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -604,7 +771,7 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -631,7 +798,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRedeploy(ctx context.Context // on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRedeploy" @@ -677,7 +844,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -686,7 +853,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context // BeginReimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -712,7 +879,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimage(ctx context.Context, // Reimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimage" @@ -758,7 +925,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetVMReimageInput != nil { @@ -774,7 +941,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -801,7 +968,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimageAll(ctx context.Conte // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimageAll" @@ -847,7 +1014,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -856,7 +1023,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte // BeginRestart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -882,7 +1049,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRestart(ctx context.Context, // Restart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRestart" @@ -928,7 +1095,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -938,7 +1105,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. // scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -993,7 +1160,7 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreate if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1011,7 +1178,7 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataHandle // BeginRunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1039,7 +1206,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRunCommand(ctx context.Conte // RunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) runCommand(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRunCommand" @@ -1085,7 +1252,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1097,7 +1264,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte // SimulateEviction - The operation to simulate the eviction of spot virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1148,7 +1315,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1157,7 +1324,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx // BeginStart - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1183,7 +1350,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginStart(ctx context.Context, r // Start - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginStart" @@ -1229,7 +1396,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1238,7 +1405,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co // BeginUpdate - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - instanceID - The instance ID of the virtual machine. @@ -1265,7 +1432,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginUpdate(ctx context.Context, // Update - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 func (client *VirtualMachineScaleSetVMsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginUpdate" @@ -1311,8 +1478,14 @@ func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{*options.IfMatch} + } + if options != nil && options.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} + } req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go index 066ecc7b4..02aadc8c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go @@ -46,7 +46,7 @@ func NewVirtualMachineSizesClient(subscriptionID string, credential azcore.Token // NewListPager - This API is deprecated. Use Resources Skus [https://docs.microsoft.com/rest/api/compute/resourceskus/list] // -// Generated from API version 2023-07-01 +// Generated from API version 2023-09-01 // - location - The location upon which virtual-machine-sizes is queried. // - options - VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.NewListPager // method. @@ -90,7 +90,7 @@ func (client *VirtualMachineSizesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-07-01") + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md deleted file mode 100644 index 52911e4cc..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json deleted file mode 100644 index 10e0d5369..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", - "tag": "package-2018-09", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2018-09 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go deleted file mode 100644 index b01b46377..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details. -// -// Package privatedns implements the Azure ARM Privatedns service API version 2018-09-01. -// -// The Private DNS Management Client. -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Privatedns - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Privatedns. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go deleted file mode 100644 index a4cbf2a65..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/enums.go +++ /dev/null @@ -1,72 +0,0 @@ -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// ProvisioningState enumerates the values for provisioning state. -type ProvisioningState string - -const ( - // Canceled ... - Canceled ProvisioningState = "Canceled" - // Creating ... - Creating ProvisioningState = "Creating" - // Deleting ... - Deleting ProvisioningState = "Deleting" - // Failed ... - Failed ProvisioningState = "Failed" - // Succeeded ... - Succeeded ProvisioningState = "Succeeded" - // Updating ... - Updating ProvisioningState = "Updating" -) - -// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. -func PossibleProvisioningStateValues() []ProvisioningState { - return []ProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Updating} -} - -// RecordType enumerates the values for record type. -type RecordType string - -const ( - // A ... - A RecordType = "A" - // AAAA ... - AAAA RecordType = "AAAA" - // CNAME ... - CNAME RecordType = "CNAME" - // MX ... - MX RecordType = "MX" - // PTR ... - PTR RecordType = "PTR" - // SOA ... - SOA RecordType = "SOA" - // SRV ... - SRV RecordType = "SRV" - // TXT ... - TXT RecordType = "TXT" -) - -// PossibleRecordTypeValues returns an array of possible values for the RecordType const type. -func PossibleRecordTypeValues() []RecordType { - return []RecordType{A, AAAA, CNAME, MX, PTR, SOA, SRV, TXT} -} - -// VirtualNetworkLinkState enumerates the values for virtual network link state. -type VirtualNetworkLinkState string - -const ( - // Completed ... - Completed VirtualNetworkLinkState = "Completed" - // InProgress ... - InProgress VirtualNetworkLinkState = "InProgress" -) - -// PossibleVirtualNetworkLinkStateValues returns an array of possible values for the VirtualNetworkLinkState const type. -func PossibleVirtualNetworkLinkStateValues() []VirtualNetworkLinkState { - return []VirtualNetworkLinkState{Completed, InProgress} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go deleted file mode 100644 index 7c23f9f65..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/models.go +++ /dev/null @@ -1,1350 +0,0 @@ -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - -// AaaaRecord an AAAA record. -type AaaaRecord struct { - // Ipv6Address - The IPv6 address of this AAAA record. - Ipv6Address *string `json:"ipv6Address,omitempty"` -} - -// ARecord an A record. -type ARecord struct { - // Ipv4Address - The IPv4 address of this A record. - Ipv4Address *string `json:"ipv4Address,omitempty"` -} - -// CloudError an error response from the service. -type CloudError struct { - // Error - Cloud error body. - Error *CloudErrorBody `json:"error,omitempty"` -} - -// CloudErrorBody an error response from the service. -type CloudErrorBody struct { - // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. - Code *string `json:"code,omitempty"` - // Message - A message describing the error, intended to be suitable for display in a user interface. - Message *string `json:"message,omitempty"` - // Target - The target of the particular error. For example, the name of the property in error. - Target *string `json:"target,omitempty"` - // Details - A list of additional details about the error. - Details *[]CloudErrorBody `json:"details,omitempty"` -} - -// CnameRecord a CNAME record. -type CnameRecord struct { - // Cname - The canonical name for this CNAME record. - Cname *string `json:"cname,omitempty"` -} - -// MxRecord an MX record. -type MxRecord struct { - // Preference - The preference value for this MX record. - Preference *int32 `json:"preference,omitempty"` - // Exchange - The domain name of the mail host for this MX record. - Exchange *string `json:"exchange,omitempty"` -} - -// PrivateZone describes a Private DNS zone. -type PrivateZone struct { - autorest.Response `json:"-"` - // Etag - The ETag of the zone. - Etag *string `json:"etag,omitempty"` - // PrivateZoneProperties - Properties of the Private DNS zone. - *PrivateZoneProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The Azure Region where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateZone. -func (pz PrivateZone) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if pz.Etag != nil { - objectMap["etag"] = pz.Etag - } - if pz.PrivateZoneProperties != nil { - objectMap["properties"] = pz.PrivateZoneProperties - } - if pz.Tags != nil { - objectMap["tags"] = pz.Tags - } - if pz.Location != nil { - objectMap["location"] = pz.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for PrivateZone struct. -func (pz *PrivateZone) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - pz.Etag = &etag - } - case "properties": - if v != nil { - var privateZoneProperties PrivateZoneProperties - err = json.Unmarshal(*v, &privateZoneProperties) - if err != nil { - return err - } - pz.PrivateZoneProperties = &privateZoneProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - pz.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - pz.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - pz.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - pz.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - pz.Type = &typeVar - } - } - } - - return nil -} - -// PrivateZoneListResult the response to a Private DNS zone list operation. -type PrivateZoneListResult struct { - autorest.Response `json:"-"` - // Value - Information about the Private DNS zones. - Value *[]PrivateZone `json:"value,omitempty"` - // NextLink - READ-ONLY; The continuation token for the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateZoneListResult. -func (pzlr PrivateZoneListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if pzlr.Value != nil { - objectMap["value"] = pzlr.Value - } - return json.Marshal(objectMap) -} - -// PrivateZoneListResultIterator provides access to a complete listing of PrivateZone values. -type PrivateZoneListResultIterator struct { - i int - page PrivateZoneListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *PrivateZoneListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZoneListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *PrivateZoneListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter PrivateZoneListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter PrivateZoneListResultIterator) Response() PrivateZoneListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter PrivateZoneListResultIterator) Value() PrivateZone { - if !iter.page.NotDone() { - return PrivateZone{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the PrivateZoneListResultIterator type. -func NewPrivateZoneListResultIterator(page PrivateZoneListResultPage) PrivateZoneListResultIterator { - return PrivateZoneListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (pzlr PrivateZoneListResult) IsEmpty() bool { - return pzlr.Value == nil || len(*pzlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (pzlr PrivateZoneListResult) hasNextLink() bool { - return pzlr.NextLink != nil && len(*pzlr.NextLink) != 0 -} - -// privateZoneListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (pzlr PrivateZoneListResult) privateZoneListResultPreparer(ctx context.Context) (*http.Request, error) { - if !pzlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(pzlr.NextLink))) -} - -// PrivateZoneListResultPage contains a page of PrivateZone values. -type PrivateZoneListResultPage struct { - fn func(context.Context, PrivateZoneListResult) (PrivateZoneListResult, error) - pzlr PrivateZoneListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *PrivateZoneListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZoneListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.pzlr) - if err != nil { - return err - } - page.pzlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *PrivateZoneListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page PrivateZoneListResultPage) NotDone() bool { - return !page.pzlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page PrivateZoneListResultPage) Response() PrivateZoneListResult { - return page.pzlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page PrivateZoneListResultPage) Values() []PrivateZone { - if page.pzlr.IsEmpty() { - return nil - } - return *page.pzlr.Value -} - -// Creates a new instance of the PrivateZoneListResultPage type. -func NewPrivateZoneListResultPage(cur PrivateZoneListResult, getNextPage func(context.Context, PrivateZoneListResult) (PrivateZoneListResult, error)) PrivateZoneListResultPage { - return PrivateZoneListResultPage{ - fn: getNextPage, - pzlr: cur, - } -} - -// PrivateZoneProperties represents the properties of the Private DNS zone. -type PrivateZoneProperties struct { - // MaxNumberOfRecordSets - READ-ONLY; The maximum number of record sets that can be created in this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored. - MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"` - // NumberOfRecordSets - READ-ONLY; The current number of record sets in this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored. - NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"` - // MaxNumberOfVirtualNetworkLinks - READ-ONLY; The maximum number of virtual networks that can be linked to this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored. - MaxNumberOfVirtualNetworkLinks *int64 `json:"maxNumberOfVirtualNetworkLinks,omitempty"` - // NumberOfVirtualNetworkLinks - READ-ONLY; The current number of virtual networks that are linked to this Private DNS zone. This is a read-only property and any attempt to set this value will be ignored. - NumberOfVirtualNetworkLinks *int64 `json:"numberOfVirtualNetworkLinks,omitempty"` - // MaxNumberOfVirtualNetworkLinksWithRegistration - READ-ONLY; The maximum number of virtual networks that can be linked to this Private DNS zone with registration enabled. This is a read-only property and any attempt to set this value will be ignored. - MaxNumberOfVirtualNetworkLinksWithRegistration *int64 `json:"maxNumberOfVirtualNetworkLinksWithRegistration,omitempty"` - // NumberOfVirtualNetworkLinksWithRegistration - READ-ONLY; The current number of virtual networks that are linked to this Private DNS zone with registration enabled. This is a read-only property and any attempt to set this value will be ignored. - NumberOfVirtualNetworkLinksWithRegistration *int64 `json:"numberOfVirtualNetworkLinksWithRegistration,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state of the resource. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateZoneProperties. -func (pzp PrivateZoneProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// PrivateZonesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type PrivateZonesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(PrivateZonesClient) (PrivateZone, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *PrivateZonesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for PrivateZonesCreateOrUpdateFuture.Result. -func (future *PrivateZonesCreateOrUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - pz.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { - pz, err = client.CreateOrUpdateResponder(pz.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesCreateOrUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") - } - } - return -} - -// PrivateZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type PrivateZonesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(PrivateZonesClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *PrivateZonesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for PrivateZonesDeleteFuture.Result. -func (future *PrivateZonesDeleteFuture) result(client PrivateZonesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// PrivateZonesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type PrivateZonesUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(PrivateZonesClient) (PrivateZone, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *PrivateZonesUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for PrivateZonesUpdateFuture.Result. -func (future *PrivateZonesUpdateFuture) result(client PrivateZonesClient) (pz PrivateZone, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - pz.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.PrivateZonesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if pz.Response.Response, err = future.GetResult(sender); err == nil && pz.Response.Response.StatusCode != http.StatusNoContent { - pz, err = client.UpdateResponder(pz.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesUpdateFuture", "Result", pz.Response.Response, "Failure responding to request") - } - } - return -} - -// ProxyResource the resource model definition for an ARM proxy resource. -type ProxyResource struct { - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ProxyResource. -func (pr ProxyResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// PtrRecord a PTR record. -type PtrRecord struct { - // Ptrdname - The PTR target domain name for this PTR record. - Ptrdname *string `json:"ptrdname,omitempty"` -} - -// RecordSet describes a DNS record set (a collection of DNS records with the same name and type) in a -// Private DNS zone. -type RecordSet struct { - autorest.Response `json:"-"` - // Etag - The ETag of the record set. - Etag *string `json:"etag,omitempty"` - // RecordSetProperties - The properties of the record set. - *RecordSetProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for RecordSet. -func (rs RecordSet) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rs.Etag != nil { - objectMap["etag"] = rs.Etag - } - if rs.RecordSetProperties != nil { - objectMap["properties"] = rs.RecordSetProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for RecordSet struct. -func (rs *RecordSet) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - rs.Etag = &etag - } - case "properties": - if v != nil { - var recordSetProperties RecordSetProperties - err = json.Unmarshal(*v, &recordSetProperties) - if err != nil { - return err - } - rs.RecordSetProperties = &recordSetProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - rs.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - rs.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - rs.Type = &typeVar - } - } - } - - return nil -} - -// RecordSetListResult the response to a record set list operation. -type RecordSetListResult struct { - autorest.Response `json:"-"` - // Value - Information about the record sets in the response. - Value *[]RecordSet `json:"value,omitempty"` - // NextLink - READ-ONLY; The continuation token for the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for RecordSetListResult. -func (rslr RecordSetListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rslr.Value != nil { - objectMap["value"] = rslr.Value - } - return json.Marshal(objectMap) -} - -// RecordSetListResultIterator provides access to a complete listing of RecordSet values. -type RecordSetListResultIterator struct { - i int - page RecordSetListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *RecordSetListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *RecordSetListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter RecordSetListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter RecordSetListResultIterator) Response() RecordSetListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter RecordSetListResultIterator) Value() RecordSet { - if !iter.page.NotDone() { - return RecordSet{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the RecordSetListResultIterator type. -func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator { - return RecordSetListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (rslr RecordSetListResult) IsEmpty() bool { - return rslr.Value == nil || len(*rslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (rslr RecordSetListResult) hasNextLink() bool { - return rslr.NextLink != nil && len(*rslr.NextLink) != 0 -} - -// recordSetListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (rslr RecordSetListResult) recordSetListResultPreparer(ctx context.Context) (*http.Request, error) { - if !rslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(rslr.NextLink))) -} - -// RecordSetListResultPage contains a page of RecordSet values. -type RecordSetListResultPage struct { - fn func(context.Context, RecordSetListResult) (RecordSetListResult, error) - rslr RecordSetListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *RecordSetListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.rslr) - if err != nil { - return err - } - page.rslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *RecordSetListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page RecordSetListResultPage) NotDone() bool { - return !page.rslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page RecordSetListResultPage) Response() RecordSetListResult { - return page.rslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page RecordSetListResultPage) Values() []RecordSet { - if page.rslr.IsEmpty() { - return nil - } - return *page.rslr.Value -} - -// Creates a new instance of the RecordSetListResultPage type. -func NewRecordSetListResultPage(cur RecordSetListResult, getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage { - return RecordSetListResultPage{ - fn: getNextPage, - rslr: cur, - } -} - -// RecordSetProperties represents the properties of the records in the record set. -type RecordSetProperties struct { - // Metadata - The metadata attached to the record set. - Metadata map[string]*string `json:"metadata"` - // TTL - The TTL (time-to-live) of the records in the record set. - TTL *int64 `json:"ttl,omitempty"` - // Fqdn - READ-ONLY; Fully qualified domain name of the record set. - Fqdn *string `json:"fqdn,omitempty"` - // IsAutoRegistered - READ-ONLY; Is the record set auto-registered in the Private DNS zone through a virtual network link? - IsAutoRegistered *bool `json:"isAutoRegistered,omitempty"` - // ARecords - The list of A records in the record set. - ARecords *[]ARecord `json:"aRecords,omitempty"` - // AaaaRecords - The list of AAAA records in the record set. - AaaaRecords *[]AaaaRecord `json:"aaaaRecords,omitempty"` - // CnameRecord - The CNAME record in the record set. - CnameRecord *CnameRecord `json:"cnameRecord,omitempty"` - // MxRecords - The list of MX records in the record set. - MxRecords *[]MxRecord `json:"mxRecords,omitempty"` - // PtrRecords - The list of PTR records in the record set. - PtrRecords *[]PtrRecord `json:"ptrRecords,omitempty"` - // SoaRecord - The SOA record in the record set. - SoaRecord *SoaRecord `json:"soaRecord,omitempty"` - // SrvRecords - The list of SRV records in the record set. - SrvRecords *[]SrvRecord `json:"srvRecords,omitempty"` - // TxtRecords - The list of TXT records in the record set. - TxtRecords *[]TxtRecord `json:"txtRecords,omitempty"` -} - -// MarshalJSON is the custom marshaler for RecordSetProperties. -func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rsp.Metadata != nil { - objectMap["metadata"] = rsp.Metadata - } - if rsp.TTL != nil { - objectMap["ttl"] = rsp.TTL - } - if rsp.ARecords != nil { - objectMap["aRecords"] = rsp.ARecords - } - if rsp.AaaaRecords != nil { - objectMap["aaaaRecords"] = rsp.AaaaRecords - } - if rsp.CnameRecord != nil { - objectMap["cnameRecord"] = rsp.CnameRecord - } - if rsp.MxRecords != nil { - objectMap["mxRecords"] = rsp.MxRecords - } - if rsp.PtrRecords != nil { - objectMap["ptrRecords"] = rsp.PtrRecords - } - if rsp.SoaRecord != nil { - objectMap["soaRecord"] = rsp.SoaRecord - } - if rsp.SrvRecords != nil { - objectMap["srvRecords"] = rsp.SrvRecords - } - if rsp.TxtRecords != nil { - objectMap["txtRecords"] = rsp.TxtRecords - } - return json.Marshal(objectMap) -} - -// Resource the core properties of ARM resources -type Resource struct { - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SoaRecord an SOA record. -type SoaRecord struct { - // Host - The domain name of the authoritative name server for this SOA record. - Host *string `json:"host,omitempty"` - // Email - The email contact for this SOA record. - Email *string `json:"email,omitempty"` - // SerialNumber - The serial number for this SOA record. - SerialNumber *int64 `json:"serialNumber,omitempty"` - // RefreshTime - The refresh value for this SOA record. - RefreshTime *int64 `json:"refreshTime,omitempty"` - // RetryTime - The retry time for this SOA record. - RetryTime *int64 `json:"retryTime,omitempty"` - // ExpireTime - The expire time for this SOA record. - ExpireTime *int64 `json:"expireTime,omitempty"` - // MinimumTTL - The minimum value for this SOA record. By convention this is used to determine the negative caching duration. - MinimumTTL *int64 `json:"minimumTtl,omitempty"` -} - -// SrvRecord an SRV record. -type SrvRecord struct { - // Priority - The priority value for this SRV record. - Priority *int32 `json:"priority,omitempty"` - // Weight - The weight value for this SRV record. - Weight *int32 `json:"weight,omitempty"` - // Port - The port value for this SRV record. - Port *int32 `json:"port,omitempty"` - // Target - The target domain name for this SRV record. - Target *string `json:"target,omitempty"` -} - -// SubResource reference to another subresource. -type SubResource struct { - // ID - Resource ID. - ID *string `json:"id,omitempty"` -} - -// TrackedResource the resource model definition for a ARM tracked top level resource -type TrackedResource struct { - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The Azure Region where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrackedResource. -func (tr TrackedResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tr.Tags != nil { - objectMap["tags"] = tr.Tags - } - if tr.Location != nil { - objectMap["location"] = tr.Location - } - return json.Marshal(objectMap) -} - -// TxtRecord a TXT record. -type TxtRecord struct { - // Value - The text value of this TXT record. - Value *[]string `json:"value,omitempty"` -} - -// VirtualNetworkLink describes a link to virtual network for a Private DNS zone. -type VirtualNetworkLink struct { - autorest.Response `json:"-"` - // Etag - The ETag of the virtual network link. - Etag *string `json:"etag,omitempty"` - // VirtualNetworkLinkProperties - Properties of the virtual network link to the Private DNS zone. - *VirtualNetworkLinkProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The Azure Region where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource Id for the resource. Example - '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. Example - 'Microsoft.Network/privateDnsZones'. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkLink. -func (vnl VirtualNetworkLink) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vnl.Etag != nil { - objectMap["etag"] = vnl.Etag - } - if vnl.VirtualNetworkLinkProperties != nil { - objectMap["properties"] = vnl.VirtualNetworkLinkProperties - } - if vnl.Tags != nil { - objectMap["tags"] = vnl.Tags - } - if vnl.Location != nil { - objectMap["location"] = vnl.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualNetworkLink struct. -func (vnl *VirtualNetworkLink) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "etag": - if v != nil { - var etag string - err = json.Unmarshal(*v, &etag) - if err != nil { - return err - } - vnl.Etag = &etag - } - case "properties": - if v != nil { - var virtualNetworkLinkProperties VirtualNetworkLinkProperties - err = json.Unmarshal(*v, &virtualNetworkLinkProperties) - if err != nil { - return err - } - vnl.VirtualNetworkLinkProperties = &virtualNetworkLinkProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vnl.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vnl.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vnl.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vnl.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vnl.Type = &typeVar - } - } - } - - return nil -} - -// VirtualNetworkLinkListResult the response to a list virtual network link to Private DNS zone operation. -type VirtualNetworkLinkListResult struct { - autorest.Response `json:"-"` - // Value - Information about the virtual network links to the Private DNS zones. - Value *[]VirtualNetworkLink `json:"value,omitempty"` - // NextLink - READ-ONLY; The continuation token for the next page of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkLinkListResult. -func (vnllr VirtualNetworkLinkListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vnllr.Value != nil { - objectMap["value"] = vnllr.Value - } - return json.Marshal(objectMap) -} - -// VirtualNetworkLinkListResultIterator provides access to a complete listing of VirtualNetworkLink values. -type VirtualNetworkLinkListResultIterator struct { - i int - page VirtualNetworkLinkListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualNetworkLinkListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinkListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *VirtualNetworkLinkListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualNetworkLinkListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualNetworkLinkListResultIterator) Response() VirtualNetworkLinkListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualNetworkLinkListResultIterator) Value() VirtualNetworkLink { - if !iter.page.NotDone() { - return VirtualNetworkLink{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the VirtualNetworkLinkListResultIterator type. -func NewVirtualNetworkLinkListResultIterator(page VirtualNetworkLinkListResultPage) VirtualNetworkLinkListResultIterator { - return VirtualNetworkLinkListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (vnllr VirtualNetworkLinkListResult) IsEmpty() bool { - return vnllr.Value == nil || len(*vnllr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (vnllr VirtualNetworkLinkListResult) hasNextLink() bool { - return vnllr.NextLink != nil && len(*vnllr.NextLink) != 0 -} - -// virtualNetworkLinkListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vnllr VirtualNetworkLinkListResult) virtualNetworkLinkListResultPreparer(ctx context.Context) (*http.Request, error) { - if !vnllr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vnllr.NextLink))) -} - -// VirtualNetworkLinkListResultPage contains a page of VirtualNetworkLink values. -type VirtualNetworkLinkListResultPage struct { - fn func(context.Context, VirtualNetworkLinkListResult) (VirtualNetworkLinkListResult, error) - vnllr VirtualNetworkLinkListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualNetworkLinkListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinkListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.vnllr) - if err != nil { - return err - } - page.vnllr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *VirtualNetworkLinkListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualNetworkLinkListResultPage) NotDone() bool { - return !page.vnllr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualNetworkLinkListResultPage) Response() VirtualNetworkLinkListResult { - return page.vnllr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualNetworkLinkListResultPage) Values() []VirtualNetworkLink { - if page.vnllr.IsEmpty() { - return nil - } - return *page.vnllr.Value -} - -// Creates a new instance of the VirtualNetworkLinkListResultPage type. -func NewVirtualNetworkLinkListResultPage(cur VirtualNetworkLinkListResult, getNextPage func(context.Context, VirtualNetworkLinkListResult) (VirtualNetworkLinkListResult, error)) VirtualNetworkLinkListResultPage { - return VirtualNetworkLinkListResultPage{ - fn: getNextPage, - vnllr: cur, - } -} - -// VirtualNetworkLinkProperties represents the properties of the Private DNS zone. -type VirtualNetworkLinkProperties struct { - // VirtualNetwork - The reference of the virtual network. - VirtualNetwork *SubResource `json:"virtualNetwork,omitempty"` - // RegistrationEnabled - Is auto-registration of virtual machine records in the virtual network in the Private DNS zone enabled? - RegistrationEnabled *bool `json:"registrationEnabled,omitempty"` - // VirtualNetworkLinkState - READ-ONLY; The status of the virtual network link to the Private DNS zone. Possible values are 'InProgress' and 'Done'. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'InProgress', 'Completed' - VirtualNetworkLinkState VirtualNetworkLinkState `json:"virtualNetworkLinkState,omitempty"` - // ProvisioningState - READ-ONLY; The provisioning state of the resource. This is a read-only property and any attempt to set this value will be ignored. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualNetworkLinkProperties. -func (vnlp VirtualNetworkLinkProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vnlp.VirtualNetwork != nil { - objectMap["virtualNetwork"] = vnlp.VirtualNetwork - } - if vnlp.RegistrationEnabled != nil { - objectMap["registrationEnabled"] = vnlp.RegistrationEnabled - } - return json.Marshal(objectMap) -} - -// VirtualNetworkLinksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualNetworkLinksCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *VirtualNetworkLinksCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for VirtualNetworkLinksCreateOrUpdateFuture.Result. -func (future *VirtualNetworkLinksCreateOrUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - vnl.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { - vnl, err = client.CreateOrUpdateResponder(vnl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksCreateOrUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualNetworkLinksDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualNetworkLinksDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(VirtualNetworkLinksClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *VirtualNetworkLinksDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for VirtualNetworkLinksDeleteFuture.Result. -func (future *VirtualNetworkLinksDeleteFuture) result(client VirtualNetworkLinksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualNetworkLinksUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualNetworkLinksUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(VirtualNetworkLinksClient) (VirtualNetworkLink, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *VirtualNetworkLinksUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for VirtualNetworkLinksUpdateFuture.Result. -func (future *VirtualNetworkLinksUpdateFuture) result(client VirtualNetworkLinksClient) (vnl VirtualNetworkLink, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - vnl.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("privatedns.VirtualNetworkLinksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vnl.Response.Response, err = future.GetResult(sender); err == nil && vnl.Response.Response.StatusCode != http.StatusNoContent { - vnl, err = client.UpdateResponder(vnl.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksUpdateFuture", "Result", vnl.Response.Response, "Failure responding to request") - } - } - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go deleted file mode 100644 index 841905875..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/privatezones.go +++ /dev/null @@ -1,614 +0,0 @@ -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// PrivateZonesClient is the the Private DNS Management Client. -type PrivateZonesClient struct { - BaseClient -} - -// NewPrivateZonesClient creates an instance of the PrivateZonesClient client. -func NewPrivateZonesClient(subscriptionID string) PrivateZonesClient { - return NewPrivateZonesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewPrivateZonesClientWithBaseURI creates an instance of the PrivateZonesClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewPrivateZonesClientWithBaseURI(baseURI string, subscriptionID string) PrivateZonesClient { - return PrivateZonesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a Private DNS zone. Does not modify Links to virtual networks or DNS records -// within the zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// parameters - parameters supplied to the CreateOrUpdate operation. -// ifMatch - the ETag of the Private DNS zone. Omit this value to always overwrite the current zone. Specify -// the last-seen ETag value to prevent accidentally overwriting any concurrent changes. -// ifNoneMatch - set to '*' to allow a new Private DNS zone to be created, but to prevent updating an existing -// zone. Other values will be ignored. -func (client PrivateZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string, ifNoneMatch string) (result PrivateZonesCreateOrUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, parameters, ifMatch, ifNoneMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - result, err = client.CreateOrUpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "CreateOrUpdate", result.Response(), "Failure sending request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client PrivateZonesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string, ifNoneMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - if len(ifNoneMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) CreateOrUpdateSender(req *http.Request) (future PrivateZonesCreateOrUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) CreateOrUpdateResponder(resp *http.Response) (result PrivateZone, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a Private DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot -// be undone. Private DNS zone cannot be deleted unless all virtual network links to it are removed. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// ifMatch - the ETag of the Private DNS zone. Omit this value to always delete the current zone. Specify the -// last-seen ETag value to prevent accidentally deleting any concurrent changes. -func (client PrivateZonesClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, ifMatch string) (result PrivateZonesDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client PrivateZonesClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) DeleteSender(req *http.Request) (future PrivateZonesDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a Private DNS zone. Retrieves the zone properties, but not the virtual networks links or the record sets -// within the zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -func (client PrivateZonesClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string) (result PrivateZone, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client PrivateZonesClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) GetResponder(resp *http.Response) (result PrivateZone, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists the Private DNS zones in all resource groups in a subscription. -// Parameters: -// top - the maximum number of Private DNS zones to return. If not specified, returns up to 100 zones. -func (client PrivateZonesClient) List(ctx context.Context, top *int32) (result PrivateZoneListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.List") - defer func() { - sc := -1 - if result.pzlr.Response.Response != nil { - sc = result.pzlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, top) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.pzlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", resp, "Failure sending request") - return - } - - result.pzlr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "List", resp, "Failure responding to request") - return - } - if result.pzlr.hasNextLink() && result.pzlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client PrivateZonesClient) ListPreparer(ctx context.Context, top *int32) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateDnsZones", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) ListResponder(resp *http.Response) (result PrivateZoneListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client PrivateZonesClient) listNextResults(ctx context.Context, lastResults PrivateZoneListResult) (result PrivateZoneListResult, err error) { - req, err := lastResults.privateZoneListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client PrivateZonesClient) ListComplete(ctx context.Context, top *int32) (result PrivateZoneListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, top) - return -} - -// ListByResourceGroup lists the Private DNS zones within a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. -func (client PrivateZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result PrivateZoneListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.pzlr.Response.Response != nil { - sc = result.pzlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.pzlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.pzlr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.pzlr.hasNextLink() && result.pzlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client PrivateZonesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) ListByResourceGroupResponder(resp *http.Response) (result PrivateZoneListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client PrivateZonesClient) listByResourceGroupNextResults(ctx context.Context, lastResults PrivateZoneListResult) (result PrivateZoneListResult, err error) { - req, err := lastResults.privateZoneListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client PrivateZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result PrivateZoneListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top) - return -} - -// Update updates a Private DNS zone. Does not modify virtual network links or DNS records within the zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// parameters - parameters supplied to the Update operation. -// ifMatch - the ETag of the Private DNS zone. Omit this value to always overwrite the current zone. Specify -// the last-seen ETag value to prevent accidentally overwriting any concurrent changes. -func (client PrivateZonesClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string) (result PrivateZonesUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateZonesClient.Update") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, parameters, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Update", nil, "Failure preparing request") - return - } - - result, err = client.UpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.PrivateZonesClient", "Update", result.Response(), "Failure sending request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client PrivateZonesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, parameters PrivateZone, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateZonesClient) UpdateSender(req *http.Request) (future PrivateZonesUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client PrivateZonesClient) UpdateResponder(resp *http.Response) (result PrivateZone, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go deleted file mode 100644 index 2c863316f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/recordsets.go +++ /dev/null @@ -1,640 +0,0 @@ -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// RecordSetsClient is the the Private DNS Management Client. -type RecordSetsClient struct { - BaseClient -} - -// NewRecordSetsClient creates an instance of the RecordSetsClient client. -func NewRecordSetsClient(subscriptionID string) RecordSetsClient { - return NewRecordSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { - return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a record set within a Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// recordType - the type of DNS record in this record set. Record sets of type SOA can be updated but not -// created (they are created when the Private DNS zone is created). -// relativeRecordSetName - the name of the record set, relative to the name of the zone. -// parameters - parameters supplied to the CreateOrUpdate operation. -// ifMatch - the ETag of the record set. Omit this value to always overwrite the current record set. Specify -// the last-seen ETag value to prevent accidentally overwriting any concurrent changes. -// ifNoneMatch - set to '*' to allow a new record set to be created, but to prevent updating an existing record -// set. Other values will be ignored. -func (client RecordSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, parameters, ifMatch, ifNoneMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client RecordSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "recordType": autorest.Encode("path", recordType), - "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - if len(ifNoneMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (result RecordSet, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a record set from a Private DNS zone. This operation cannot be undone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// recordType - the type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are -// deleted when the Private DNS zone is deleted). -// relativeRecordSetName - the name of the record set, relative to the name of the zone. -// ifMatch - the ETag of the record set. Omit this value to always delete the current record set. Specify the -// last-seen ETag value to prevent accidentally deleting any concurrent changes. -func (client RecordSetsClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client RecordSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "recordType": autorest.Encode("path", recordType), - "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a record set. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// recordType - the type of DNS record in this record set. -// relativeRecordSetName - the name of the record set, relative to the name of the zone. -func (client RecordSetsClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string) (result RecordSet, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client RecordSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "recordType": autorest.Encode("path", recordType), - "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordSet, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all record sets in a Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. -// recordsetnamesuffix - the suffix label of the record set name to be used to filter the record set -// enumeration. If this parameter is specified, the returned enumeration will only contain records that end -// with ".". -func (client RecordSetsClient) List(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.List") - defer func() { - sc := -1 - if result.rslr.Response.Response != nil { - sc = result.rslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, privateZoneName, top, recordsetnamesuffix) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.rslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", resp, "Failure sending request") - return - } - - result.rslr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "List", resp, "Failure responding to request") - return - } - if result.rslr.hasNextLink() && result.rslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client RecordSetsClient) ListPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if len(recordsetnamesuffix) > 0 { - queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) ListResponder(resp *http.Response) (result RecordSetListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client RecordSetsClient) listNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { - req, err := lastResults.recordSetListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client RecordSetsClient) ListComplete(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, privateZoneName, top, recordsetnamesuffix) - return -} - -// ListByType lists the record sets of a specified type in a Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// recordType - the type of record sets to enumerate. -// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. -// recordsetnamesuffix - the suffix label of the record set name to be used to filter the record set -// enumeration. If this parameter is specified, the returned enumeration will only contain records that end -// with ".". -func (client RecordSetsClient) ListByType(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") - defer func() { - sc := -1 - if result.rslr.Response.Response != nil { - sc = result.rslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByTypeNextResults - req, err := client.ListByTypePreparer(ctx, resourceGroupName, privateZoneName, recordType, top, recordsetnamesuffix) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", nil, "Failure preparing request") - return - } - - resp, err := client.ListByTypeSender(req) - if err != nil { - result.rslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", resp, "Failure sending request") - return - } - - result.rslr, err = client.ListByTypeResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "ListByType", resp, "Failure responding to request") - return - } - if result.rslr.hasNextLink() && result.rslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByTypePreparer prepares the ListByType request. -func (client RecordSetsClient) ListByTypePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "recordType": autorest.Encode("path", recordType), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - if len(recordsetnamesuffix) > 0 { - queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByTypeSender sends the ListByType request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByTypeResponder handles the response to the ListByType request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByTypeNextResults retrieves the next set of results, if any. -func (client RecordSetsClient) listByTypeNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { - req, err := lastResults.recordSetListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByTypeSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByTypeResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "listByTypeNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByTypeComplete enumerates all values, automatically crossing page boundaries as required. -func (client RecordSetsClient) ListByTypeComplete(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByType(ctx, resourceGroupName, privateZoneName, recordType, top, recordsetnamesuffix) - return -} - -// Update updates a record set within a Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// recordType - the type of DNS record in this record set. -// relativeRecordSetName - the name of the record set, relative to the name of the zone. -// parameters - parameters supplied to the Update operation. -// ifMatch - the ETag of the record set. Omit this value to always overwrite the current record set. Specify -// the last-seen ETag value to prevent accidentally overwriting concurrent changes. -func (client RecordSetsClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string) (result RecordSet, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, recordType, relativeRecordSetName, parameters, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.RecordSetsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client RecordSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "recordType": autorest.Encode("path", recordType), - "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go deleted file mode 100644 index 32bd8ba13..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package privatedns - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " privatedns/2018-09-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go deleted file mode 100644 index 002a39d96..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/virtualnetworklinks.go +++ /dev/null @@ -1,509 +0,0 @@ -package privatedns - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// VirtualNetworkLinksClient is the the Private DNS Management Client. -type VirtualNetworkLinksClient struct { - BaseClient -} - -// NewVirtualNetworkLinksClient creates an instance of the VirtualNetworkLinksClient client. -func NewVirtualNetworkLinksClient(subscriptionID string) VirtualNetworkLinksClient { - return NewVirtualNetworkLinksClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewVirtualNetworkLinksClientWithBaseURI creates an instance of the VirtualNetworkLinksClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewVirtualNetworkLinksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkLinksClient { - return VirtualNetworkLinksClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a virtual network link to the specified Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// virtualNetworkLinkName - the name of the virtual network link. -// parameters - parameters supplied to the CreateOrUpdate operation. -// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always overwrite -// the current virtual network link. Specify the last-seen ETag value to prevent accidentally overwriting any -// concurrent changes. -// ifNoneMatch - set to '*' to allow a new virtual network link to the Private DNS zone to be created, but to -// prevent updating an existing link. Other values will be ignored. -func (client VirtualNetworkLinksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string, ifNoneMatch string) (result VirtualNetworkLinksCreateOrUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, ifMatch, ifNoneMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - result, err = client.CreateOrUpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "CreateOrUpdate", result.Response(), "Failure sending request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualNetworkLinksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string, ifNoneMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - if len(ifNoneMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkLinksClient) CreateOrUpdateSender(req *http.Request) (future VirtualNetworkLinksCreateOrUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client VirtualNetworkLinksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkLink, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a virtual network link to the specified Private DNS zone. WARNING: In case of a registration virtual -// network, all auto-registered DNS records in the zone for the virtual network will also be deleted. This operation -// cannot be undone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// virtualNetworkLinkName - the name of the virtual network link. -// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always delete the -// current zone. Specify the last-seen ETag value to prevent accidentally deleting any concurrent changes. -func (client VirtualNetworkLinksClient) Delete(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, ifMatch string) (result VirtualNetworkLinksDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client VirtualNetworkLinksClient) DeletePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkLinksClient) DeleteSender(req *http.Request) (future VirtualNetworkLinksDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client VirtualNetworkLinksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a virtual network link to the specified Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// virtualNetworkLinkName - the name of the virtual network link. -func (client VirtualNetworkLinksClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (result VirtualNetworkLink, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client VirtualNetworkLinksClient) GetPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkLinksClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client VirtualNetworkLinksClient) GetResponder(resp *http.Response) (result VirtualNetworkLink, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists the virtual network links to the specified Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// top - the maximum number of virtual network links to return. If not specified, returns up to 100 virtual -// network links. -func (client VirtualNetworkLinksClient) List(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (result VirtualNetworkLinkListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.List") - defer func() { - sc := -1 - if result.vnllr.Response.Response != nil { - sc = result.vnllr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, privateZoneName, top) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.vnllr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", resp, "Failure sending request") - return - } - - result.vnllr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "List", resp, "Failure responding to request") - return - } - if result.vnllr.hasNextLink() && result.vnllr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client VirtualNetworkLinksClient) ListPreparer(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkLinksClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client VirtualNetworkLinksClient) ListResponder(resp *http.Response) (result VirtualNetworkLinkListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client VirtualNetworkLinksClient) listNextResults(ctx context.Context, lastResults VirtualNetworkLinkListResult) (result VirtualNetworkLinkListResult, err error) { - req, err := lastResults.virtualNetworkLinkListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client VirtualNetworkLinksClient) ListComplete(ctx context.Context, resourceGroupName string, privateZoneName string, top *int32) (result VirtualNetworkLinkListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, privateZoneName, top) - return -} - -// Update updates a virtual network link to the specified Private DNS zone. -// Parameters: -// resourceGroupName - the name of the resource group. -// privateZoneName - the name of the Private DNS zone (without a terminating dot). -// virtualNetworkLinkName - the name of the virtual network link. -// parameters - parameters supplied to the Update operation. -// ifMatch - the ETag of the virtual network link to the Private DNS zone. Omit this value to always overwrite -// the current virtual network link. Specify the last-seen ETag value to prevent accidentally overwriting any -// concurrent changes. -func (client VirtualNetworkLinksClient) Update(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string) (result VirtualNetworkLinksUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkLinksClient.Update") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, ifMatch) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Update", nil, "Failure preparing request") - return - } - - result, err = client.UpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "privatedns.VirtualNetworkLinksClient", "Update", result.Response(), "Failure sending request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client VirtualNetworkLinksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, ifMatch string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateZoneName": autorest.Encode("path", privateZoneName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "virtualNetworkLinkName": autorest.Encode("path", virtualNetworkLinkName), - } - - const APIVersion = "2018-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - if len(ifMatch) > 0 { - preparer = autorest.DecoratePreparer(preparer, - autorest.WithHeader("If-Match", autorest.String(ifMatch))) - } - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client VirtualNetworkLinksClient) UpdateSender(req *http.Request) (future VirtualNetworkLinksUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client VirtualNetworkLinksClient) UpdateResponder(resp *http.Response) (result VirtualNetworkLink, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/distribution/reference/.gitattributes b/vendor/github.com/distribution/reference/.gitattributes new file mode 100644 index 000000000..d207b1802 --- /dev/null +++ b/vendor/github.com/distribution/reference/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/distribution/reference/.gitignore b/vendor/github.com/distribution/reference/.gitignore new file mode 100644 index 000000000..dc07e6b04 --- /dev/null +++ b/vendor/github.com/distribution/reference/.gitignore @@ -0,0 +1,2 @@ +# Cover profiles +*.out diff --git a/vendor/github.com/distribution/reference/.golangci.yml b/vendor/github.com/distribution/reference/.golangci.yml new file mode 100644 index 000000000..793f0bb7e --- /dev/null +++ b/vendor/github.com/distribution/reference/.golangci.yml @@ -0,0 +1,18 @@ +linters: + enable: + - bodyclose + - dupword # Checks for duplicate words in the source code + - gofmt + - goimports + - ineffassign + - misspell + - revive + - staticcheck + - unconvert + - unused + - vet + disable: + - errcheck + +run: + deadline: 2m diff --git a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..48f6704c6 --- /dev/null +++ b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/vendor/github.com/distribution/reference/CONTRIBUTING.md b/vendor/github.com/distribution/reference/CONTRIBUTING.md new file mode 100644 index 000000000..ab2194665 --- /dev/null +++ b/vendor/github.com/distribution/reference/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# Contributing to the reference library + +## Community help + +If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack. +[Click here for an invite to the CNCF community slack](https://slack.cncf.io/) + +## Reporting security issues + +The maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of (or similar for other container tools): + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing Code + +Contributions should be made via pull requests. Pull requests will be reviewed +by one or more maintainers or reviewers and merged when acceptable. + +You should follow the basic GitHub workflow: + + 1. Use your own [fork](https://help.github.com/en/articles/about-forks) + 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) + 3. Test your code + 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) + 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) + +Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) +for tips on creating a successful contribution. + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/distribution/reference/GOVERNANCE.md b/vendor/github.com/distribution/reference/GOVERNANCE.md new file mode 100644 index 000000000..200045b05 --- /dev/null +++ b/vendor/github.com/distribution/reference/GOVERNANCE.md @@ -0,0 +1,144 @@ +# distribution/reference Project Governance + +Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here. + +For specific guidance on practical contribution steps please +see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide. + +## Maintainership + +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. + +## Reviewers + +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM counts towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. + +## Adding maintainers + +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed in a pull request or a +maintainers communication channel. + +After a candidate has been announced to the maintainers, the existing +maintainers are given five business days to discuss the candidate, raise +objections and cast their vote. Votes may take place on the communication +channel or via pull request comment. Candidates must be approved by at least 66% +of the current maintainers by adding their vote on the mailing list. The +reviewer role has the same process but only requires 33% of current maintainers. +Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The voting process may take place inside a pull request if a +maintainer has already discussed the candidacy with the candidate and a +maintainer is willing to be a sponsor by opening the pull request. The candidate +becomes a maintainer once the pull request is merged. + +## Stepping down policy + +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. + +## Removal of inactive maintainers + +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of the current +maintainers. In this case, maintainers should first propose the change to +maintainers via the maintainers communication channel, then open a pull request +for voting. The voting period is five business days. The voting pull request +should not come as a surpise to any maintainer and any discussion related to +performance must not be discussed on the pull request. + +## How are decisions made? + +Docker distribution is an open-source project with an open design philosophy. +This means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, road map, and APIs. *If it's part of +the project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. + +## Helping contributors with the DCO + +The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. + +## I'm a maintainer. Should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +## Conflict Resolution + +If you have a technical dispute that you feel has reached an impasse with a +subset of the community, any contributor may open an issue, specifically +calling for a resolution vote of the current core maintainers to resolve the +dispute. The same voting quorums required (2/3) for adding and removing +maintainers will apply to conflict resolution. diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/distribution/reference/LICENSE similarity index 100% rename from vendor/github.com/docker/distribution/LICENSE rename to vendor/github.com/distribution/reference/LICENSE diff --git a/vendor/github.com/distribution/reference/MAINTAINERS b/vendor/github.com/distribution/reference/MAINTAINERS new file mode 100644 index 000000000..9e0a60c8b --- /dev/null +++ b/vendor/github.com/distribution/reference/MAINTAINERS @@ -0,0 +1,26 @@ +# Distribution project maintainers & reviewers +# +# See GOVERNANCE.md for maintainer versus reviewer roles +# +# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io) +# GitHub ID, Name, Email address +"chrispat","Chris Patterson","chrispat@github.com" +"clarkbw","Bryan Clark","clarkbw@github.com" +"corhere","Cory Snider","csnider@mirantis.com" +"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com" +"heww","He Weiwei","hweiwei@vmware.com" +"joaodrp","João Pereira","jpereira@gitlab.com" +"justincormack","Justin Cormack","justin.cormack@docker.com" +"squizzi","Kyle Squizzato","ksquizzato@mirantis.com" +"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com" +"sargun","Sargun Dhillon","sargun@sargun.me" +"wy65701436","Wang Yan","wangyan@vmware.com" +"stevelasker","Steve Lasker","steve.lasker@microsoft.com" +# +# REVIEWERS +# GitHub ID, Name, Email address +"dmcgowan","Derek McGowan","derek@mcgstyle.net" +"stevvooe","Stephen Day","stevvooe@gmail.com" +"thajeztah","Sebastiaan van Stijn","github@gone.nl" +"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com" +"Jamstah", "James Hewitt", "james.hewitt@gmail.com" diff --git a/vendor/github.com/distribution/reference/Makefile b/vendor/github.com/distribution/reference/Makefile new file mode 100644 index 000000000..c78576b75 --- /dev/null +++ b/vendor/github.com/distribution/reference/Makefile @@ -0,0 +1,25 @@ +# Project packages. +PACKAGES=$(shell go list ./...) + +# Flags passed to `go test` +BUILDFLAGS ?= +TESTFLAGS ?= + +.PHONY: all build test coverage +.DEFAULT: all + +all: build + +build: ## no binaries to build, so just check compilation suceeds + go build ${BUILDFLAGS} ./... + +test: ## run tests + go test ${TESTFLAGS} ./... + +coverage: ## generate coverprofiles from the unit tests + rm -f coverage.txt + go test ${TESTFLAGS} -cover -coverprofile=cover.out ./... + +.PHONY: help +help: + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md new file mode 100644 index 000000000..e2531e49c --- /dev/null +++ b/vendor/github.com/distribution/reference/README.md @@ -0,0 +1,30 @@ +# Distribution reference + +Go library to handle references to container images. + + + +[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference) +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) +[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) + +This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. + +## Contribution + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. + +## Communication + +For async communication and long running discussions please use issues and pull requests on the github repo. +This will be the best place to discuss design and implementation. + +For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/) +that everyone is welcome to join and chat about development. + +## Licenses + +The distribution codebase is released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/distribution/reference/SECURITY.md b/vendor/github.com/distribution/reference/SECURITY.md new file mode 100644 index 000000000..aaf983c0f --- /dev/null +++ b/vendor/github.com/distribution/reference/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +## Reporting a Vulnerability + +The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! + +Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io. diff --git a/vendor/github.com/distribution/reference/distribution-logo.svg b/vendor/github.com/distribution/reference/distribution-logo.svg new file mode 100644 index 000000000..cc9f4073b --- /dev/null +++ b/vendor/github.com/distribution/reference/distribution-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/distribution/reference/helpers.go similarity index 94% rename from vendor/github.com/docker/distribution/reference/helpers.go rename to vendor/github.com/distribution/reference/helpers.go index 978df7eab..d10c7ef83 100644 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ b/vendor/github.com/distribution/reference/helpers.go @@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string { } // FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. +// See [path.Match] for supported patterns. func FamiliarMatch(pattern string, ref Reference) (bool, error) { matched, err := path.Match(pattern, FamiliarString(ref)) if namedRef, isNamed := ref.(Named); isNamed && !matched { diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go similarity index 52% rename from vendor/github.com/docker/distribution/reference/normalize.go rename to vendor/github.com/distribution/reference/normalize.go index b3dfb7a6d..a30229d01 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/distribution/reference/normalize.go @@ -1,19 +1,42 @@ package reference import ( - "errors" "fmt" "strings" - "github.com/docker/distribution/digestset" "github.com/opencontainers/go-digest" ) -var ( +const ( + // legacyDefaultDomain is the legacy domain for Docker Hub (which was + // originally named "the Docker Index"). This domain is still used for + // authentication and image search, which were part of the "v1" Docker + // registry specification. + // + // This domain will continue to be supported, but there are plans to consolidate + // legacy domains to new "canonical" domains. Once those domains are decided + // on, we must update the normalization functions, but preserve compatibility + // with existing installs, clients, and user configuration. legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" + + // defaultDomain is the default domain used for images on Docker Hub. + // It is used to normalize "familiar" names to canonical names, for example, + // to convert "ubuntu" to "docker.io/library/ubuntu:latest". + // + // Note that actual domain of Docker Hub's registry is registry-1.docker.io. + // This domain will continue to be supported, but there are plans to consolidate + // legacy domains to new "canonical" domains. Once those domains are decided + // on, we must update the normalization functions, but preserve compatibility + // with existing installs, clients, and user configuration. + defaultDomain = "docker.io" + + // officialRepoPrefix is the namespace used for official images on Docker Hub. + // It is used to normalize "familiar" names to canonical names, for example, + // to convert "ubuntu" to "docker.io/library/ubuntu:latest". + officialRepoPrefix = "library/" + + // defaultTag is the default tag if no tag is provided. + defaultTag = "latest" ) // normalizedNamed represents a name which has been @@ -35,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) { return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) } domain, remainder := splitDockerDomain(s) - var remoteName string + var remote string if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] + remote = remainder[:tagSep] } else { - remoteName = remainder + remote = remainder } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") + if strings.ToLower(remote) != remote { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote) } ref, err := Parse(domain + "/" + remainder) @@ -56,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +// namedTaggedDigested is a reference that has both a tag and a digest. +type namedTaggedDigested interface { + NamedTagged + Digested +} + +// ParseDockerRef normalizes the image reference following the docker convention, +// which allows for references to contain both a tag and a digest. It returns a +// reference that is either tagged or digested. For references containing both +// a tag and a digest, it returns a digested reference. For example, the following +// reference: +// +// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// Is returned as a digested reference (with the ":latest" tag removed): +// +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// References that are already "tagged" or "digested" are returned unmodified: +// +// // Already a digested reference +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// // Already a named reference +// docker.io/library/busybox:latest func ParseDockerRef(ref string) (Named, error) { named, err := ParseNormalizedNamed(ref) if err != nil { return nil, err } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil + if canonical, ok := named.(namedTaggedDigested); ok { + // The reference is both tagged and digested; only return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err } + return WithDigest(newNamed, canonical.Digest()) } return TagNameOnly(named), nil } -// splitDockerDomain splits a repository name to domain and remotename string. +// splitDockerDomain splits a repository name to domain and remote-name. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { domain, remainder = defaultDomain, name } else { domain, remainder = name[:i], name[i+1:] @@ -99,13 +134,13 @@ func splitDockerDomain(name string) (domain, remainder string) { domain = defaultDomain } if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder + remainder = officialRepoPrefix + remainder } return } // familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain +// to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". @@ -119,8 +154,15 @@ func familiarizeName(named namedRepository) repository { if repo.domain == defaultDomain { repo.domain = "" // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] + if strings.HasPrefix(repo.path, officialRepoPrefix) { + // TODO(thaJeztah): this check may be too strict, as it assumes the + // "library/" namespace does not have nested namespaces. While this + // is true (currently), technically it would be possible for Docker + // Hub to use those (e.g. "library/distros/ubuntu:latest"). + // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785. + if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') { + repo.path = remainder + } } } return repo @@ -180,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) { return ParseNormalizedNamed(ref) } - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go similarity index 93% rename from vendor/github.com/docker/distribution/reference/reference.go rename to vendor/github.com/distribution/reference/reference.go index b7cd00b0d..e98c44daa 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/distribution/reference/reference.go @@ -4,11 +4,14 @@ // Grammar // // reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] +// name := [domain '/'] remote-name +// domain := host [':' port-number] +// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A +// domain-name := domain-component ['.' domain-component]* // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* +// path (or "remote-name") := path-component ['/' path-component]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // @@ -21,7 +24,6 @@ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // // identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ package reference import ( @@ -145,7 +147,7 @@ type namedRepository interface { Path() string } -// Domain returns the domain part of the Named reference +// Domain returns the domain part of the [Named] reference. func Domain(named Named) string { if r, ok := named.(namedRepository); ok { return r.Domain() @@ -154,7 +156,7 @@ func Domain(named Named) string { return domain } -// Path returns the name without the domain part of the Named reference +// Path returns the name without the domain part of the [Named] reference. func Path(named Named) (name string) { if r, ok := named.(namedRepository); ok { return r.Path() @@ -175,7 +177,8 @@ func splitDomain(name string) (string, string) { // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name -// DEPRECATED: Use Domain or Path +// +// Deprecated: Use [Domain] or [Path]. func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() @@ -185,7 +188,6 @@ func SplitHostname(named Named) (string, string) { // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { @@ -237,7 +239,6 @@ func Parse(s string) (Reference, error) { // the Named interface. The reference must have a name and be in the canonical // form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { named, err := ParseNormalizedNamed(s) if err != nil { @@ -320,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { diff --git a/vendor/github.com/distribution/reference/regexp.go b/vendor/github.com/distribution/reference/regexp.go new file mode 100644 index 000000000..65bc49d79 --- /dev/null +++ b/vendor/github.com/distribution/reference/regexp.go @@ -0,0 +1,163 @@ +package reference + +import ( + "regexp" + "strings" +) + +// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). +var DigestRegexp = regexp.MustCompile(digestPat) + +// DomainRegexp matches hostname or IP-addresses, optionally including a port +// number. It defines the structure of potential domain components that may be +// part of image names. This is purposely a subset of what is allowed by DNS to +// ensure backwards compatibility with Docker image names. It may be a subset of +// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between +// square brackets (excluding zone identifiers as defined by [RFC 6874] or special +// addresses such as IPv4-Mapped). +// +// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. +var DomainRegexp = regexp.MustCompile(domainAndPort) + +// IdentifierRegexp is the format for string identifier used as a +// content addressable identifier using sha256. These identifiers +// are like digests without the algorithm, since sha256 is used. +var IdentifierRegexp = regexp.MustCompile(identifier) + +// NameRegexp is the format for the name component of references, including +// an optional domain and port, but without tag or digest suffix. +var NameRegexp = regexp.MustCompile(namePat) + +// ReferenceRegexp is the full supported format of a reference. The regexp +// is anchored and has capturing groups for name, tag, and digest +// components. +var ReferenceRegexp = regexp.MustCompile(referencePat) + +// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. +// +// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 +var TagRegexp = regexp.MustCompile(tag) + +const ( + // alphanumeric defines the alphanumeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphanumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allows one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]+)` + + // localhost is treated as a special value for domain-name. Any other + // domain-name without a "." or a ":port" are considered a path component. + localhost = `localhost` + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // optionalPort matches an optional port-number including the port separator + // (e.g. ":80"). + optionalPort = `(?::[0-9]+)?` + + // tag matches valid tag names. From docker/docker:graph/tags.go. + tag = `[\w][\w.-]{0,127}` + + // digestPat matches well-formed digests, including algorithm (e.g. "sha256:"). + // + // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp + // so that go-digest defines the canonical format. Note that the go-digest is + // more relaxed: + // - it allows multiple algorithms (e.g. "sha256+b64:") to allow + // future expansion of supported algorithms. + // - it allows the "" value to use urlsafe base64 encoding as defined + // in [rfc4648, section 5]. + // + // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5. + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + + // identifier is the format for a content addressable identifier using sha256. + // These identifiers are like digests without the algorithm, since sha256 is used. + identifier = `([a-f0-9]{64})` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = `\[(?:[a-fA-F0-9:]+)\]` +) + +var ( + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent) + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + domainAndPort = host + optionalPort + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(anchored(tag)) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat)) + + // pathComponent restricts path-components to start with an alphanumeric + // character, with following parts able to be separated by a separator + // (one period, one or two underscore and multiple dashes). + pathComponent = alphanumeric + anyTimes(separator+alphanumeric) + + // remoteName matches the remote-name of a repository. It consists of one + // or more forward slash (/) delimited path-components: + // + // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" + remoteName = pathComponent + anyTimes(`/`+pathComponent) + namePat = optional(domainAndPort+`/`) + remoteName + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName))) + + referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat))) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier)) +) + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return `(?:` + strings.Join(res, "") + `)?` +} + +// anyTimes wraps the expression in a non-capturing group that can occur +// any number of times. +func anyTimes(res ...string) string { + return `(?:` + strings.Join(res, "") + `)*` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + strings.Join(res, "") + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + strings.Join(res, "") + `$` +} diff --git a/vendor/github.com/distribution/reference/sort.go b/vendor/github.com/distribution/reference/sort.go new file mode 100644 index 000000000..416c37b07 --- /dev/null +++ b/vendor/github.com/distribution/reference/sort.go @@ -0,0 +1,75 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "sort" +) + +// Sort sorts string references preferring higher information references. +// +// The precedence is as follows: +// +// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:") +// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest") +// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:") +// 4. [Named] (e.g., "docker.io/library/busybox") +// 5. [Digested] (e.g., "docker.io@sha256:") +// 6. Parse error +func Sort(references []string) []string { + var prefs []Reference + var bad []string + + for _, ref := range references { + pref, err := ParseAnyReference(ref) + if err != nil { + bad = append(bad, ref) + } else { + prefs = append(prefs, pref) + } + } + sort.Slice(prefs, func(a, b int) bool { + ar := refRank(prefs[a]) + br := refRank(prefs[b]) + if ar == br { + return prefs[a].String() < prefs[b].String() + } + return ar < br + }) + sort.Strings(bad) + var refs []string + for _, pref := range prefs { + refs = append(refs, pref.String()) + } + return append(refs, bad...) +} + +func refRank(ref Reference) uint8 { + if _, ok := ref.(Named); ok { + if _, ok = ref.(Tagged); ok { + if _, ok = ref.(Digested); ok { + return 1 + } + return 2 + } + if _, ok = ref.(Digested); ok { + return 3 + } + return 4 + } + return 5 +} diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca7..000000000 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 786034932..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 352018e70..5edd5a7ca 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,11 +1,15 @@ # Change history of go-restful -## [v3.10.2] - 2023-03-09 +## [v3.11.0] - 2023-08-19 + +- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. + +## [v3.10.2] - 2023-03-09 - DO NOT USE - introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 see comment in Readme how to customize this behaviour. -## [v3.10.1] - 2022-11-19 +## [v3.10.1] - 2022-11-19 - DO NOT USE - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 85da90128..e3e30080e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging @@ -96,10 +96,7 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path - - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. - - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. - - you can set value to a custom implementation (must implement MergePathStrategyFunc) +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources @@ -112,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index ea05b3da8..306c44be7 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -40,7 +40,8 @@ type Route struct { ParameterDocs []*Parameter ResponseErrors map[int]ResponseError DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload + ReadSample, WriteSample interface{} // structs that model an example request or response payload + WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values // Extra information used to store custom information about the route. Metadata map[string]interface{} @@ -164,7 +165,13 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.TrimLeft(path, "/"), "/") + if TrimRightSlashEnabled { + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") + } else { + // 3.10.2 + return strings.Split(strings.TrimLeft(path, "/"), "/") + } } // for debugging @@ -177,4 +184,8 @@ func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } -var TrimRightSlashEnabled = false +// TrimRightSlashEnabled controls whether +// - path on route building is using path.Join +// - the path of the incoming request is trimmed of its slash suffux. +// Value of true matches the behavior of <= 3.9.0 +var TrimRightSlashEnabled = true diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 827f471de..75168c12e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -31,17 +31,18 @@ type RouteBuilder struct { typeNameHandleFunc TypeNameHandleFunction // required // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - extensions map[string]interface{} - deprecated bool - contentEncodingEnabled *bool + doc string + notes string + operation string + readSample interface{} + writeSamples []interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + extensions map[string]interface{} + deprecated bool + contentEncodingEnabled *bool } // Do evaluates each argument with the RouteBuilder itself. @@ -135,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { return p } -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample +// Writes tells which one of the resource types will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder { + b.writeSamples = samples // oneof return b } @@ -342,39 +343,29 @@ func (b *RouteBuilder) Build() Route { ResponseErrors: b.errorMap, DefaultResponse: b.defaultResponse, ReadSample: b.readSample, - WriteSample: b.writeSample, + WriteSamples: b.writeSamples, Metadata: b.metadata, Deprecated: b.deprecated, contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + // set WriteSample if one specified + if len(b.writeSamples) == 1 { + route.WriteSample = b.writeSamples[0] + } route.Extensions = b.extensions route.postBuild() return route } -type MergePathStrategyFunc func(rootPath, routePath string) string - -var ( - // behavior >= 3.10 - PathJoinStrategy = func(rootPath, routePath string) string { - return path.Join(rootPath, routePath) - } +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { - // behavior <= 3.9 - TrimSlashStrategy = func(rootPath, routePath string) string { + if TrimRightSlashEnabled { return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } else { + return path.Join(rootPath, routePath) } - - // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. - // The value is set to PathJoinStrategy - // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] - MergePathStrategy = PathJoinStrategy -) - -// merge two paths using the current (package global) merge path strategy. -func concatPath(rootPath, routePath string) string { - return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfbd..8969526a6 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -91,11 +91,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +146,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +188,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: - - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } - - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } - -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 000000000..de8bcc3ad --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 000000000..f012f9a18 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 000000000..065ef0b82 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 12e5807cc..fb2f866f4 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -100,6 +100,11 @@ type Options struct { // details, see docs for Go's time.Layout. TimestampFormat string + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. @@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } f := Formatter{ outputFormat: outfmt, prefix: "", @@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - depth int - opts *Options + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int } // outputFormat indicates which outputFormat to use. @@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { - buf.WriteByte('{') + buf.WriteByte('{') // for the whole line } + vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, false, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { + + if f.parentValuesStr != "" { if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } + buf.WriteByte(f.comma()) } + buf.WriteString(f.parentValuesStr) continuing = true + } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } buf.WriteString(f.valuesStr) + continuing = true } + vals = args if hook := f.opts.RenderArgsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, continuing, true) // escape user-provided keys + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups + } + if f.outputFormat == outputJSON { - buf.WriteByte('}') + buf.WriteByte('}') // for the whole line } + return buf.String() } @@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } + copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } k = f.nonStringKey(kvList[i]) kvList[i] = k } @@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if i > 0 || continuing { if f.outputFormat == outputJSON { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. @@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } } - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } for i := 0; i < len(v); i += 2 { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { continue } if printComma { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { @@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { name = fld.Name } // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) @@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { i := 0 for it.Next() { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" @@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } } buf.WriteString(keystr) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } @@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any { return kvList } +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. @@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } - args = append(args, "level", level, "msg", msg) + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) return prefix, f.render(args, kvList) } diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 000000000..7bd84761e --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a18..b4428e105 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go similarity index 63% rename from vendor/github.com/go-logr/logr/slogr/sloghandler.go rename to vendor/github.com/go-logr/logr/sloghandler.go index ec6725ce2..82d1ba494 100644 --- a/vendor/github.com/go-logr/logr/slogr/sloghandler.go +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -17,18 +17,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" - - "github.com/go-logr/logr" ) type slogHandler struct { // May be nil, in which case all logs get discarded. - sink logr.LogSink + sink LogSink // Non-nil if sink is non-nil and implements SlogSink. slogSink SlogSink @@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level { return l.levelBias } -func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) } @@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) return true }) if record.Level >= slog.LevelError { @@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { // are called by Handle, code in slog gets skipped. // // This offset currently (Go 1.21.0) works for calls through -// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// slog.New(ToSlogHandler(...)). There's no guarantee that the call // chain won't change. Wrapping the handler will also break unwinding. It's // still better than not adjusting at all.... // -// This cannot be done when constructing the handler because NewLogr needs +// This cannot be done when constructing the handler because FromSlogHandler needs // access to the original sink without this adjustment. A second copy would // work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() logr.LogSink { - if sink, ok := l.sink.(logr.CallDepthLogSink); ok { +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { return sink.WithCallDepth(2) } return l.sink @@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return l } - copy := *l + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithAttrs(attrs) - copy.sink = copy.slogSink + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink } else { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) } - copy.sink = l.sink.WithValues(kvList...) + clone.sink = l.sink.WithValues(kvList...) } - return © + return &clone } func (l *slogHandler) WithGroup(name string) slog.Handler { if l.sink == nil { return l } - copy := *l + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithGroup(name) - copy.sink = l.slogSink + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink } else { - copy.groupPrefix = copy.addGroupPrefix(name) + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) } - return © + + return kvList } -func (l *slogHandler) addGroupPrefix(name string) string { - if l.groupPrefix == "" { +func addPrefix(prefix, name string) string { + if prefix == "" { return name } - return l.groupPrefix + groupSeparator + name + if name == "" { + return prefix + } + return prefix + groupSeparator + name } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is -// passed to a logr.LogSink and that API did not historically document whether +// passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l *slogHandler) levelFromSlog(level slog.Level) int { result := -level - result += l.levelBias // in case the original logr.Logger had a V level + result += l.levelBias // in case the original Logger had a V level if result < 0 { - result = 0 // because logr.LogSink doesn't expect negative V levels + result = 0 // because LogSink doesn't expect negative V levels } return int(result) } diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 000000000..28a83d024 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go index eb519ae23..36432c56f 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -23,10 +23,11 @@ limitations under the License. // // See the README in the top-level [./logr] package for a discussion of // interoperability. +// +// Deprecated: use the main logr package instead. package slogr import ( - "context" "log/slog" "github.com/go-logr/logr" @@ -34,75 +35,27 @@ import ( // NewLogr returns a logr.Logger which writes to the slog.Handler. // -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. +// Deprecated: use [logr.FromSlogHandler] instead. func NewLogr(handler slog.Handler) logr.Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return logr.Discard() - } - return logr.New(handler.sink).V(int(handler.levelBias)) - } - return logr.New(&slogSink{handler: handler}) + return logr.FromSlogHandler(handler) } // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the logr.Logger: -// -// logger := -// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the logr.Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +// Deprecated: use [logr.ToSlogHandler] instead. func NewSlogHandler(logger logr.Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } + return logr.ToSlogHandler(logger) +} - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler +// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func ToSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) } // SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: +// logging through the slog.Logger or slog.Handler APIs better. // -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in NewLogr -// and NewSlogHandler. -type SlogSink interface { - logr.LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} +// Deprecated: use [logr.SlogSink] instead. +type SlogSink = logr.SlogSink diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go similarity index 82% rename from vendor/github.com/go-logr/logr/slogr/slogsink.go rename to vendor/github.com/go-logr/logr/slogsink.go index 6fbac561d..4060fcbc2 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogsink.go +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -17,24 +17,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" "runtime" "time" - - "github.com/go-logr/logr" ) var ( - _ logr.LogSink = &slogSink{} - _ logr.CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} ) -// Underlier is implemented by the LogSink returned by NewLogr. +// Underlier is implemented by the LogSink returned by NewFromLogHandler. type Underlier interface { // GetUnderlying returns the Handler used by the LogSink. GetUnderlying() slog.Handler @@ -54,7 +52,7 @@ type slogSink struct { handler slog.Handler } -func (l *slogSink) Init(info logr.RuntimeInfo) { +func (l *slogSink) Init(info RuntimeInfo) { l.callDepth = info.CallDepth } @@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler { return l.handler } -func (l *slogSink) WithCallDepth(depth int) logr.LogSink { +func (l *slogSink) WithCallDepth(depth int) LogSink { newLogger := *l newLogger.callDepth += depth return &newLogger @@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf record.AddAttrs(slog.Any(errKey, err)) } record.Add(kvList...) - l.handler.Handle(context.Background(), record) + _ = l.handler.Handle(context.Background(), record) } -func (l slogSink) WithName(name string) logr.LogSink { +func (l slogSink) WithName(name string) LogSink { if l.name != "" { - l.name = l.name + "/" + l.name += "/" } l.name += name return &l } -func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { +func (l slogSink) WithValues(kvList ...interface{}) LogSink { l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) return &l } diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS deleted file mode 100644 index def849cab..000000000 --- a/vendor/github.com/golang/mock/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute (and typically -# have contributed) code to the gomock repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Aaron Jacobs -Alex Reece -David Symonds -Ryan Barrett diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 433132113..0905f6353 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "macro.go", "options.go", "program.go", + "validator.go", ], importpath = "github.com/google/cel-go/cel", visibility = ["//visibility:public"], @@ -22,15 +23,18 @@ go_library( "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", + "//common/ast:go_default_library", "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", - "//interpreter/functions:go_default_library", "//parser:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", @@ -72,6 +76,8 @@ go_test( "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index c0624d1e5..0f9501341 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -16,341 +16,133 @@ package cel import ( "fmt" - "strings" - "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types. -type Kind uint +type Kind = types.Kind const ( // DynKind represents a dynamic type. This kind only exists at type-check time. - DynKind Kind = iota + DynKind Kind = types.DynKind // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. - AnyKind + AnyKind = types.AnyKind // BoolKind represents a boolean type. - BoolKind + BoolKind = types.BoolKind // BytesKind represents a bytes type. - BytesKind + BytesKind = types.BytesKind // DoubleKind represents a double type. - DoubleKind + DoubleKind = types.DoubleKind // DurationKind represents a CEL duration type. - DurationKind + DurationKind = types.DurationKind // IntKind represents an integer type. - IntKind + IntKind = types.IntKind // ListKind represents a list type. - ListKind + ListKind = types.ListKind // MapKind represents a map type. - MapKind + MapKind = types.MapKind // NullTypeKind represents a null type. - NullTypeKind + NullTypeKind = types.NullTypeKind // OpaqueKind represents an abstract type which has no accessible fields. - OpaqueKind + OpaqueKind = types.OpaqueKind // StringKind represents a string type. - StringKind + StringKind = types.StringKind // StructKind represents a structured object with typed fields. - StructKind + StructKind = types.StructKind // TimestampKind represents a a CEL time type. - TimestampKind + TimestampKind = types.TimestampKind // TypeKind represents the CEL type. - TypeKind + TypeKind = types.TypeKind // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. - TypeParamKind + TypeParamKind = types.TypeParamKind // UintKind represents a uint type. - UintKind + UintKind = types.UintKind ) var ( // AnyType represents the google.protobuf.Any type. - AnyType = &Type{ - kind: AnyKind, - runtimeType: types.NewTypeValue("google.protobuf.Any"), - } + AnyType = types.AnyType // BoolType represents the bool type. - BoolType = &Type{ - kind: BoolKind, - runtimeType: types.BoolType, - } + BoolType = types.BoolType // BytesType represents the bytes type. - BytesType = &Type{ - kind: BytesKind, - runtimeType: types.BytesType, - } + BytesType = types.BytesType // DoubleType represents the double type. - DoubleType = &Type{ - kind: DoubleKind, - runtimeType: types.DoubleType, - } + DoubleType = types.DoubleType // DurationType represents the CEL duration type. - DurationType = &Type{ - kind: DurationKind, - runtimeType: types.DurationType, - } + DurationType = types.DurationType // DynType represents a dynamic CEL type whose type will be determined at runtime from context. - DynType = &Type{ - kind: DynKind, - runtimeType: types.NewTypeValue("dyn"), - } + DynType = types.DynType // IntType represents the int type. - IntType = &Type{ - kind: IntKind, - runtimeType: types.IntType, - } + IntType = types.IntType // NullType represents the type of a null value. - NullType = &Type{ - kind: NullTypeKind, - runtimeType: types.NullType, - } + NullType = types.NullType // StringType represents the string type. - StringType = &Type{ - kind: StringKind, - runtimeType: types.StringType, - } + StringType = types.StringType // TimestampType represents the time type. - TimestampType = &Type{ - kind: TimestampKind, - runtimeType: types.TimestampType, - } + TimestampType = types.TimestampType // TypeType represents a CEL type - TypeType = &Type{ - kind: TypeKind, - runtimeType: types.TypeType, - } + TypeType = types.TypeType // UintType represents a uint type. - UintType = &Type{ - kind: UintKind, - runtimeType: types.UintType, - } + UintType = types.UintType + + // function references for instantiating new types. + + // ListType creates an instances of a list type value with the provided element type. + ListType = types.NewListType + // MapType creates an instance of a map type value with the provided key and value types. + MapType = types.NewMapType + // NullableType creates an instance of a nullable type with the provided wrapped type. + // + // Note: only primitive types are supported as wrapped types. + NullableType = types.NewNullableType + // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. + OptionalType = types.NewOptionalType + // OpaqueType creates an abstract parameterized type with a given name. + OpaqueType = types.NewOpaqueType + // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. + ObjectType = types.NewObjectType + // TypeParamType creates a parameterized type instance. + TypeParamType = types.NewTypeParamType ) // Type holds a reference to a runtime type with an optional type-checked set of type parameters. -type Type struct { - // kind indicates general category of the type. - kind Kind - - // runtimeType is the runtime type of the declaration. - runtimeType ref.Type - - // parameters holds the optional type-checked set of type parameters that are used during static analysis. - parameters []*Type - - // isAssignableType function determines whether one type is assignable to this type. - // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. - isAssignableType func(other *Type) bool - - // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. - // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. - isAssignableRuntimeType func(other ref.Val) bool -} - -// IsAssignableType determines whether the current type is type-check assignable from the input fromType. -func (t *Type) IsAssignableType(fromType *Type) bool { - if t.isAssignableType != nil { - return t.isAssignableType(fromType) - } - return t.defaultIsAssignableType(fromType) -} - -// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. -// -// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) -// will have a runtime assignable type of a map. -func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { - if t.isAssignableRuntimeType != nil { - return t.isAssignableRuntimeType(val) - } - return t.defaultIsAssignableRuntimeType(val) -} - -// String returns a human-readable definition of the type name. -func (t *Type) String() string { - if len(t.parameters) == 0 { - return t.runtimeType.TypeName() - } - params := make([]string, len(t.parameters)) - for i, p := range t.parameters { - params[i] = p.String() - } - return fmt.Sprintf("%s(%s)", t.runtimeType.TypeName(), strings.Join(params, ", ")) -} - -// isDyn indicates whether the type is dynamic in any way. -func (t *Type) isDyn() bool { - return t.kind == DynKind || t.kind == AnyKind || t.kind == TypeParamKind -} - -// equals indicates whether two types have the same kind, type name, and parameters. -func (t *Type) equals(other *Type) bool { - if t.kind != other.kind || - t.runtimeType.TypeName() != other.runtimeType.TypeName() || - len(t.parameters) != len(other.parameters) { - return false - } - for i, p := range t.parameters { - if !p.equals(other.parameters[i]) { - return false - } - } - return true -} - -// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another -// where any of the following may return a true result: -// - The from types are the same instance -// - The target type is dynamic -// - The fromType has the same kind and type name as the target type, and all parameters of the target type -// -// are IsAssignableType() from the parameters of the fromType. -func (t *Type) defaultIsAssignableType(fromType *Type) bool { - if t == fromType || t.isDyn() { - return true - } - if t.kind != fromType.kind || - t.runtimeType.TypeName() != fromType.runtimeType.TypeName() || - len(t.parameters) != len(fromType.parameters) { - return false - } - for i, tp := range t.parameters { - fp := fromType.parameters[i] - if !tp.IsAssignableType(fp) { - return false - } - } - return true -} - -// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types -// to determine whether a ref.Val is assignable to the declared type for a function signature. -func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { - valType := val.Type() - if !(t.runtimeType == valType || t.isDyn() || t.runtimeType.TypeName() == valType.TypeName()) { - return false - } - switch t.runtimeType { - case types.ListType: - elemType := t.parameters[0] - l := val.(traits.Lister) - if l.Size() == types.IntZero { - return true - } - it := l.Iterator() - for it.HasNext() == types.True { - elemVal := it.Next() - return elemType.IsAssignableRuntimeType(elemVal) - } - case types.MapType: - keyType := t.parameters[0] - elemType := t.parameters[1] - m := val.(traits.Mapper) - if m.Size() == types.IntZero { - return true - } - it := m.Iterator() - for it.HasNext() == types.True { - keyVal := it.Next() - elemVal := m.Get(keyVal) - return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) - } - } - return true -} - -// ListType creates an instances of a list type value with the provided element type. -func ListType(elemType *Type) *Type { - return &Type{ - kind: ListKind, - runtimeType: types.ListType, - parameters: []*Type{elemType}, - } -} - -// MapType creates an instance of a map type value with the provided key and value types. -func MapType(keyType, valueType *Type) *Type { - return &Type{ - kind: MapKind, - runtimeType: types.MapType, - parameters: []*Type{keyType, valueType}, - } -} - -// NullableType creates an instance of a nullable type with the provided wrapped type. -// -// Note: only primitive types are supported as wrapped types. -func NullableType(wrapped *Type) *Type { - return &Type{ - kind: wrapped.kind, - runtimeType: wrapped.runtimeType, - parameters: wrapped.parameters, - isAssignableType: func(other *Type) bool { - return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) - }, - isAssignableRuntimeType: func(other ref.Val) bool { - return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) - }, - } -} - -// OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. -func OptionalType(param *Type) *Type { - return OpaqueType("optional", param) -} - -// OpaqueType creates an abstract parameterized type with a given name. -func OpaqueType(name string, params ...*Type) *Type { - return &Type{ - kind: OpaqueKind, - runtimeType: types.NewTypeValue(name), - parameters: params, - } -} - -// ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. -func ObjectType(typeName string) *Type { - return &Type{ - kind: StructKind, - runtimeType: types.NewObjectTypeValue(typeName), - } -} +type Type = types.Type -// TypeParamType creates a parameterized type instance. -func TypeParamType(paramName string) *Type { - return &Type{ - kind: TypeParamKind, - runtimeType: types.NewTypeValue(paramName), +// Constant creates an instances of an identifier declaration with a variable name, type, and value. +func Constant(name string, t *Type, v ref.Val) EnvOption { + return func(e *Env) (*Env, error) { + e.variables = append(e.variables, decls.NewConstant(name, t, v)) + return e, nil } } // Variable creates an instance of a variable declaration with a variable name and type. func Variable(name string, t *Type) EnvOption { return func(e *Env) (*Env, error) { - et, err := TypeToExprType(t) - if err != nil { - return nil, err - } - e.declarations = append(e.declarations, decls.NewVar(name, et)) + e.variables = append(e.variables, decls.NewVariable(name, t)) return e, nil } } @@ -386,53 +178,30 @@ func Variable(name string, t *Type) EnvOption { // overload as CEL can only make inferences by type-name regarding such types. func Function(name string, opts ...FunctionOpt) EnvOption { return func(e *Env) (*Env, error) { - fn := &functionDecl{ - name: name, - overloads: []*overloadDecl{}, - options: opts, - } - err := fn.init() - if err != nil { - return nil, err - } - _, err = functionDeclToExprDecl(fn) + fn, err := decls.NewFunction(name, opts...) if err != nil { return nil, err } - if existing, found := e.functions[fn.name]; found { - fn, err = existing.merge(fn) + if existing, found := e.functions[fn.Name()]; found { + fn, err = existing.Merge(fn) if err != nil { return nil, err } } - e.functions[name] = fn + e.functions[fn.Name()] = fn return e, nil } } // FunctionOpt defines a functional option for configuring a function declaration. -type FunctionOpt func(*functionDecl) (*functionDecl, error) +type FunctionOpt = decls.FunctionOpt // SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. // // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *functionDecl) (*functionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) - } - f.singleton = &functions.Overload{ - Operator: f.name, - Unary: fn, - OperandTrait: trait, - } - return f, nil - } + return decls.SingletonUnaryBinding(fn, traits...) } // SingletonBinaryImpl creates a singleton function definition to be used with all function overloads. @@ -442,7 +211,7 @@ func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { // // Deprecated: use SingletonBinaryBinding func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { - return SingletonBinaryBinding(fn, traits...) + return decls.SingletonBinaryBinding(fn, traits...) } // SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. @@ -450,21 +219,7 @@ func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *functionDecl) (*functionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) - } - f.singleton = &functions.Overload{ - Operator: f.name, - Binary: fn, - OperandTrait: trait, - } - return f, nil - } + return decls.SingletonBinaryBinding(fn, traits...) } // SingletonFunctionImpl creates a singleton function definition to be used with all function overloads. @@ -474,7 +229,7 @@ func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { // // Deprecated: use SingletonFunctionBinding func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { - return SingletonFunctionBinding(fn, traits...) + return decls.SingletonFunctionBinding(fn, traits...) } // SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. @@ -482,21 +237,13 @@ func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *functionDecl) (*functionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) - } - f.singleton = &functions.Overload{ - Operator: f.name, - Function: fn, - OperandTrait: trait, - } - return f, nil - } + return decls.SingletonFunctionBinding(fn, traits...) +} + +// DisableDeclaration disables the function signatures, effectively removing them from the type-check +// environment while preserving the runtime bindings. +func DisableDeclaration(value bool) FunctionOpt { + return decls.DisableDeclaration(value) } // Overload defines a new global overload with an overload id, argument types, and result type. Through the @@ -506,7 +253,7 @@ func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOp // Note: function bindings should be commonly configured with Overload instances whereas operand traits and // strict-ness should be rare occurrences. func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { - return newOverload(overloadID, false, args, resultType, opts...) + return decls.Overload(overloadID, args, resultType, opts...) } // MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, @@ -516,609 +263,51 @@ func Overload(overloadID string, args []*Type, resultType *Type, opts ...Overloa // Note: function bindings should be commonly configured with Overload instances whereas operand traits and // strict-ness should be rare occurrences. func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { - return newOverload(overloadID, true, args, resultType, opts...) + return decls.MemberOverload(overloadID, args, resultType, opts...) } // OverloadOpt is a functional option for configuring a function overload. -type OverloadOpt func(*overloadDecl) (*overloadDecl, error) +type OverloadOpt = decls.OverloadOpt // UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func UnaryBinding(binding functions.UnaryOp) OverloadOpt { - return func(o *overloadDecl) (*overloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.id) - } - if len(o.argTypes) != 1 { - return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.id) - } - o.unaryOp = binding - return o, nil - } + return decls.UnaryBinding(binding) } // BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func BinaryBinding(binding functions.BinaryOp) OverloadOpt { - return func(o *overloadDecl) (*overloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.id) - } - if len(o.argTypes) != 2 { - return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.id) - } - o.binaryOp = binding - return o, nil - } + return decls.BinaryBinding(binding) } // FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func FunctionBinding(binding functions.FunctionOp) OverloadOpt { - return func(o *overloadDecl) (*overloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.id) - } - o.functionOp = binding - return o, nil - } + return decls.FunctionBinding(binding) } // OverloadIsNonStrict enables the function to be called with error and unknown argument values. // // Note: do not use this option unless absoluately necessary as it should be an uncommon feature. func OverloadIsNonStrict() OverloadOpt { - return func(o *overloadDecl) (*overloadDecl, error) { - o.nonStrict = true - return o, nil - } + return decls.OverloadIsNonStrict() } // OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be // successfully invoked. func OverloadOperandTrait(trait int) OverloadOpt { - return func(o *overloadDecl) (*overloadDecl, error) { - o.operandTrait = trait - return o, nil - } -} - -type functionDecl struct { - name string - overloads []*overloadDecl - options []FunctionOpt - singleton *functions.Overload - initialized bool -} - -// init ensures that a function's options have been applied. -// -// This function is used in both the environment configuration and internally for function merges. -func (f *functionDecl) init() error { - if f.initialized { - return nil - } - f.initialized = true - - var err error - for _, opt := range f.options { - f, err = opt(f) - if err != nil { - return err - } - } - if len(f.overloads) == 0 { - return fmt.Errorf("function %s must have at least one overload", f.name) - } - return nil -} - -// bindings produces a set of function bindings, if any are defined. -func (f *functionDecl) bindings() ([]*functions.Overload, error) { - overloads := []*functions.Overload{} - nonStrict := false - for _, o := range f.overloads { - if o.hasBinding() { - overload := &functions.Overload{ - Operator: o.id, - Unary: o.guardedUnaryOp(f.name), - Binary: o.guardedBinaryOp(f.name), - Function: o.guardedFunctionOp(f.name), - OperandTrait: o.operandTrait, - NonStrict: o.nonStrict, - } - overloads = append(overloads, overload) - nonStrict = nonStrict || o.nonStrict - } - } - if f.singleton != nil { - if len(overloads) != 0 { - return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.name) - } - return []*functions.Overload{ - { - Operator: f.name, - Unary: f.singleton.Unary, - Binary: f.singleton.Binary, - Function: f.singleton.Function, - OperandTrait: f.singleton.OperandTrait, - }, - }, nil - } - if len(overloads) == 0 { - return overloads, nil - } - // Single overload. Replicate an entry for it using the function name as well. - if len(overloads) == 1 { - if overloads[0].Operator == f.name { - return overloads, nil - } - return append(overloads, &functions.Overload{ - Operator: f.name, - Unary: overloads[0].Unary, - Binary: overloads[0].Binary, - Function: overloads[0].Function, - NonStrict: overloads[0].NonStrict, - OperandTrait: overloads[0].OperandTrait, - }), nil - } - // All of the defined overloads are wrapped into a top-level function which - // performs dynamic dispatch to the proper overload based on the argument types. - bindings := append([]*functions.Overload{}, overloads...) - funcDispatch := func(args ...ref.Val) ref.Val { - for _, o := range f.overloads { - if !o.matchesRuntimeSignature(args...) { - continue - } - switch len(args) { - case 1: - if o.unaryOp != nil { - return o.unaryOp(args[0]) - } - case 2: - if o.binaryOp != nil { - return o.binaryOp(args[0], args[1]) - } - } - if o.functionOp != nil { - return o.functionOp(args...) - } - // eventually this will fall through to the noSuchOverload below. - } - return noSuchOverload(f.name, args...) - } - function := &functions.Overload{ - Operator: f.name, - Function: funcDispatch, - NonStrict: nonStrict, - } - return append(bindings, function), nil -} - -// merge one function declaration with another. -// -// If a function is extended, by say adding new overloads to an existing function, then it is merged with the -// prior definition of the function at which point its overloads must not collide with pre-existing overloads -// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. -func (f *functionDecl) merge(other *functionDecl) (*functionDecl, error) { - if f.name != other.name { - return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.name, other.name) - } - err := f.init() - if err != nil { - return nil, err - } - err = other.init() - if err != nil { - return nil, err - } - merged := &functionDecl{ - name: f.name, - overloads: make([]*overloadDecl, len(f.overloads)), - options: []FunctionOpt{}, - initialized: true, - singleton: f.singleton, - } - copy(merged.overloads, f.overloads) - for _, o := range other.overloads { - err := merged.addOverload(o) - if err != nil { - return nil, fmt.Errorf("function declaration merge failed: %v", err) - } - } - if other.singleton != nil { - if merged.singleton != nil { - return nil, fmt.Errorf("function already has a binding: %s", f.name) - } - merged.singleton = other.singleton - } - return merged, nil -} - -// addOverload ensures that the new overload does not collide with an existing overload signature; -// however, if the function signatures are identical, the implementation may be rewritten as its -// difficult to compare functions by object identity. -func (f *functionDecl) addOverload(overload *overloadDecl) error { - for index, o := range f.overloads { - if o.id != overload.id && o.signatureOverlaps(overload) { - return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.name, o.id, overload.id) - } - if o.id == overload.id { - if o.signatureEquals(overload) && o.nonStrict == overload.nonStrict { - // Allow redefinition of an overload implementation so long as the signatures match. - f.overloads[index] = overload - return nil - } - return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id) - } - } - f.overloads = append(f.overloads, overload) - return nil -} - -func noSuchOverload(funcName string, args ...ref.Val) ref.Val { - argTypes := make([]string, len(args)) - for i, arg := range args { - argTypes[i] = arg.Type().TypeName() - } - signature := strings.Join(argTypes, ", ") - return types.NewErr("no such overload: %s(%s)", funcName, signature) -} - -// overloadDecl contains all of the relevant information regarding a specific function overload. -type overloadDecl struct { - id string - argTypes []*Type - resultType *Type - memberFunction bool - - // binding options, optional but encouraged. - unaryOp functions.UnaryOp - binaryOp functions.BinaryOp - functionOp functions.FunctionOp - - // behavioral options, uncommon - nonStrict bool - operandTrait int -} - -func (o *overloadDecl) hasBinding() bool { - return o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil -} - -// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. -func (o *overloadDecl) guardedUnaryOp(funcName string) functions.UnaryOp { - if o.unaryOp == nil { - return nil - } - return func(arg ref.Val) ref.Val { - if !o.matchesRuntimeUnarySignature(arg) { - return noSuchOverload(funcName, arg) - } - return o.unaryOp(arg) - } -} - -// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. -func (o *overloadDecl) guardedBinaryOp(funcName string) functions.BinaryOp { - if o.binaryOp == nil { - return nil - } - return func(arg1, arg2 ref.Val) ref.Val { - if !o.matchesRuntimeBinarySignature(arg1, arg2) { - return noSuchOverload(funcName, arg1, arg2) - } - return o.binaryOp(arg1, arg2) - } -} - -// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. -func (o *overloadDecl) guardedFunctionOp(funcName string) functions.FunctionOp { - if o.functionOp == nil { - return nil - } - return func(args ...ref.Val) ref.Val { - if !o.matchesRuntimeSignature(args...) { - return noSuchOverload(funcName, args...) - } - return o.functionOp(args...) - } -} - -// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. -func (o *overloadDecl) matchesRuntimeUnarySignature(arg ref.Val) bool { - if o.nonStrict && types.IsUnknownOrError(arg) { - return true - } - return o.argTypes[0].IsAssignableRuntimeType(arg) && (o.operandTrait == 0 || arg.Type().HasTrait(o.operandTrait)) -} - -// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. -func (o *overloadDecl) matchesRuntimeBinarySignature(arg1, arg2 ref.Val) bool { - if o.nonStrict { - if types.IsUnknownOrError(arg1) { - return types.IsUnknownOrError(arg2) || o.argTypes[1].IsAssignableRuntimeType(arg2) - } - } else if !o.argTypes[1].IsAssignableRuntimeType(arg2) { - return false - } - return o.argTypes[0].IsAssignableRuntimeType(arg1) && (o.operandTrait == 0 || arg1.Type().HasTrait(o.operandTrait)) -} - -// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. -func (o *overloadDecl) matchesRuntimeSignature(args ...ref.Val) bool { - if len(args) != len(o.argTypes) { - return false - } - if len(args) == 0 { - return true - } - allArgsMatch := true - for i, arg := range args { - if o.nonStrict && types.IsUnknownOrError(arg) { - continue - } - allArgsMatch = allArgsMatch && o.argTypes[i].IsAssignableRuntimeType(arg) - } - - arg := args[0] - return allArgsMatch && (o.operandTrait == 0 || (o.nonStrict && types.IsUnknownOrError(arg)) || arg.Type().HasTrait(o.operandTrait)) -} - -// signatureEquals indicates whether one overload has an identical signature to another overload. -// -// Providing a duplicate signature is not an issue, but an overloapping signature is problematic. -func (o *overloadDecl) signatureEquals(other *overloadDecl) bool { - if o.id != other.id || o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) { - return false - } - for i, at := range o.argTypes { - oat := other.argTypes[i] - if !at.equals(oat) { - return false - } - } - return o.resultType.equals(other.resultType) -} - -// signatureOverlaps indicates whether one overload has an overlapping signature with another overload. -// -// The 'other' overload must first be checked for equality before determining whether it overlaps in order to be completely accurate. -func (o *overloadDecl) signatureOverlaps(other *overloadDecl) bool { - if o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) { - return false - } - argsOverlap := true - for i, argType := range o.argTypes { - otherArgType := other.argTypes[i] - argsOverlap = argsOverlap && - (argType.IsAssignableType(otherArgType) || - otherArgType.IsAssignableType(argType)) - } - return argsOverlap -} - -func newOverload(overloadID string, memberFunction bool, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { - return func(f *functionDecl) (*functionDecl, error) { - overload := &overloadDecl{ - id: overloadID, - argTypes: args, - resultType: resultType, - memberFunction: memberFunction, - } - var err error - for _, opt := range opts { - overload, err = opt(overload) - if err != nil { - return nil, err - } - } - err = f.addOverload(overload) - if err != nil { - return nil, err - } - return f, nil - } -} - -func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { - if t.IsAssignableType(NullType) { - return decls.NewWrapperType(pbType) - } - return pbType + return decls.OverloadOperandTrait(trait) } // TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. func TypeToExprType(t *Type) (*exprpb.Type, error) { - switch t.kind { - case AnyKind: - return decls.Any, nil - case BoolKind: - return maybeWrapper(t, decls.Bool), nil - case BytesKind: - return maybeWrapper(t, decls.Bytes), nil - case DoubleKind: - return maybeWrapper(t, decls.Double), nil - case DurationKind: - return decls.Duration, nil - case DynKind: - return decls.Dyn, nil - case IntKind: - return maybeWrapper(t, decls.Int), nil - case ListKind: - et, err := TypeToExprType(t.parameters[0]) - if err != nil { - return nil, err - } - return decls.NewListType(et), nil - case MapKind: - kt, err := TypeToExprType(t.parameters[0]) - if err != nil { - return nil, err - } - vt, err := TypeToExprType(t.parameters[1]) - if err != nil { - return nil, err - } - return decls.NewMapType(kt, vt), nil - case NullTypeKind: - return decls.Null, nil - case OpaqueKind: - params := make([]*exprpb.Type, len(t.parameters)) - for i, p := range t.parameters { - pt, err := TypeToExprType(p) - if err != nil { - return nil, err - } - params[i] = pt - } - return decls.NewAbstractType(t.runtimeType.TypeName(), params...), nil - case StringKind: - return maybeWrapper(t, decls.String), nil - case StructKind: - switch t.runtimeType.TypeName() { - case "google.protobuf.Any": - return decls.Any, nil - case "google.protobuf.Duration": - return decls.Duration, nil - case "google.protobuf.Timestamp": - return decls.Timestamp, nil - case "google.protobuf.Value": - return decls.Dyn, nil - case "google.protobuf.ListValue": - return decls.NewListType(decls.Dyn), nil - case "google.protobuf.Struct": - return decls.NewMapType(decls.String, decls.Dyn), nil - case "google.protobuf.BoolValue": - return decls.NewWrapperType(decls.Bool), nil - case "google.protobuf.BytesValue": - return decls.NewWrapperType(decls.Bytes), nil - case "google.protobuf.DoubleValue", "google.protobuf.FloatValue": - return decls.NewWrapperType(decls.Double), nil - case "google.protobuf.Int32Value", "google.protobuf.Int64Value": - return decls.NewWrapperType(decls.Int), nil - case "google.protobuf.StringValue": - return decls.NewWrapperType(decls.String), nil - case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value": - return decls.NewWrapperType(decls.Uint), nil - default: - return decls.NewObjectType(t.runtimeType.TypeName()), nil - } - case TimestampKind: - return decls.Timestamp, nil - case TypeParamKind: - return decls.NewTypeParamType(t.runtimeType.TypeName()), nil - case TypeKind: - return decls.NewTypeType(decls.Dyn), nil - case UintKind: - return maybeWrapper(t, decls.Uint), nil - } - return nil, fmt.Errorf("missing type conversion to proto: %v", t) + return types.TypeToExprType(t) } // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { - switch t.GetTypeKind().(type) { - case *exprpb.Type_Dyn: - return DynType, nil - case *exprpb.Type_AbstractType_: - paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) - for i, p := range t.GetAbstractType().GetParameterTypes() { - pt, err := ExprTypeToType(p) - if err != nil { - return nil, err - } - paramTypes[i] = pt - } - return OpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil - case *exprpb.Type_ListType_: - et, err := ExprTypeToType(t.GetListType().GetElemType()) - if err != nil { - return nil, err - } - return ListType(et), nil - case *exprpb.Type_MapType_: - kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) - if err != nil { - return nil, err - } - vt, err := ExprTypeToType(t.GetMapType().GetValueType()) - if err != nil { - return nil, err - } - return MapType(kt, vt), nil - case *exprpb.Type_MessageType: - switch t.GetMessageType() { - case "google.protobuf.Any": - return AnyType, nil - case "google.protobuf.Duration": - return DurationType, nil - case "google.protobuf.Timestamp": - return TimestampType, nil - case "google.protobuf.Value": - return DynType, nil - case "google.protobuf.ListValue": - return ListType(DynType), nil - case "google.protobuf.Struct": - return MapType(StringType, DynType), nil - case "google.protobuf.BoolValue": - return NullableType(BoolType), nil - case "google.protobuf.BytesValue": - return NullableType(BytesType), nil - case "google.protobuf.DoubleValue", "google.protobuf.FloatValue": - return NullableType(DoubleType), nil - case "google.protobuf.Int32Value", "google.protobuf.Int64Value": - return NullableType(IntType), nil - case "google.protobuf.StringValue": - return NullableType(StringType), nil - case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value": - return NullableType(UintType), nil - default: - return ObjectType(t.GetMessageType()), nil - } - case *exprpb.Type_Null: - return NullType, nil - case *exprpb.Type_Primitive: - switch t.GetPrimitive() { - case exprpb.Type_BOOL: - return BoolType, nil - case exprpb.Type_BYTES: - return BytesType, nil - case exprpb.Type_DOUBLE: - return DoubleType, nil - case exprpb.Type_INT64: - return IntType, nil - case exprpb.Type_STRING: - return StringType, nil - case exprpb.Type_UINT64: - return UintType, nil - default: - return nil, fmt.Errorf("unsupported primitive type: %v", t) - } - case *exprpb.Type_TypeParam: - return TypeParamType(t.GetTypeParam()), nil - case *exprpb.Type_Type: - return TypeType, nil - case *exprpb.Type_WellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return AnyType, nil - case exprpb.Type_DURATION: - return DurationType, nil - case exprpb.Type_TIMESTAMP: - return TimestampType, nil - default: - return nil, fmt.Errorf("unsupported well-known type: %v", t) - } - case *exprpb.Type_Wrapper: - t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) - if err != nil { - return nil, err - } - return NullableType(t), nil - default: - return nil, fmt.Errorf("unsupported type: %v", t) - } + return types.ExprTypeToType(t) } // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. @@ -1130,82 +319,42 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { - a, err := ExprTypeToType(p) + a, err := types.ExprTypeToType(p) if err != nil { return nil, err } args[j] = a } - res, err := ExprTypeToType(o.GetResultType()) + res, err := types.ExprTypeToType(o.GetResultType()) if err != nil { return nil, err } - opts[i] = Overload(o.GetOverloadId(), args, res) + if o.IsInstanceFunction { + opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res) + } else { + opts[i] = decls.Overload(o.GetOverloadId(), args, res) + } } return Function(d.GetName(), opts...), nil case *exprpb.Decl_Ident: - t, err := ExprTypeToType(d.GetIdent().GetType()) + t, err := types.ExprTypeToType(d.GetIdent().GetType()) if err != nil { return nil, err } - return Variable(d.GetName(), t), nil - default: - return nil, fmt.Errorf("unsupported decl: %v", d) - } - -} - -func functionDeclToExprDecl(f *functionDecl) (*exprpb.Decl, error) { - overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) - i := 0 - for _, o := range f.overloads { - paramNames := map[string]struct{}{} - argTypes := make([]*exprpb.Type, len(o.argTypes)) - for j, a := range o.argTypes { - collectParamNames(paramNames, a) - at, err := TypeToExprType(a) - if err != nil { - return nil, err - } - argTypes[j] = at + if d.GetIdent().GetValue() == nil { + return Variable(d.GetName(), t), nil } - collectParamNames(paramNames, o.resultType) - resultType, err := TypeToExprType(o.resultType) + val, err := ast.ConstantToVal(d.GetIdent().GetValue()) if err != nil { return nil, err } - if len(paramNames) == 0 { - if o.memberFunction { - overloads[i] = decls.NewInstanceOverload(o.id, argTypes, resultType) - } else { - overloads[i] = decls.NewOverload(o.id, argTypes, resultType) - } - } else { - params := []string{} - for pn := range paramNames { - params = append(params, pn) - } - if o.memberFunction { - overloads[i] = decls.NewParameterizedInstanceOverload(o.id, argTypes, resultType, params) - } else { - overloads[i] = decls.NewParameterizedOverload(o.id, argTypes, resultType, params) - } - } - i++ - } - return decls.NewFunction(f.name, overloads...), nil -} - -func collectParamNames(paramNames map[string]struct{}, arg *Type) { - if arg.kind == TypeParamKind { - paramNames[arg.runtimeType.TypeName()] = struct{}{} - } - for _, param := range arg.parameters { - collectParamNames(paramNames, param) + return Constant(d.GetName(), t, val), nil + default: + return nil, fmt.Errorf("unsupported decl: %v", d) } } -func typeValueToKind(tv *types.TypeValue) (Kind, error) { +func typeValueToKind(tv ref.Type) (Kind, error) { switch tv { case types.BoolType: return BoolKind, nil diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index d9c2ef63f..b5c3b4cc5 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -16,13 +16,14 @@ package cel import ( "errors" - "fmt" "sync" "github.com/google/cel-go/checker" - "github.com/google/cel-go/checker/decls" + chkdecls "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common" + celast "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -40,8 +41,8 @@ type Ast struct { expr *exprpb.Expr info *exprpb.SourceInfo source Source - refMap map[int64]*exprpb.Reference - typeMap map[int64]*exprpb.Type + refMap map[int64]*celast.ReferenceInfo + typeMap map[int64]*types.Type } // Expr returns the proto serializable instance of the parsed/checked expression. @@ -60,21 +61,26 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { } // ResultType returns the output type of the expression if the Ast has been type-checked, else -// returns decls.Dyn as the parse step cannot infer the type. +// returns chkdecls.Dyn as the parse step cannot infer the type. // // Deprecated: use OutputType func (ast *Ast) ResultType() *exprpb.Type { if !ast.IsChecked() { - return decls.Dyn + return chkdecls.Dyn } - return ast.typeMap[ast.expr.GetId()] + out := ast.OutputType() + t, err := TypeToExprType(out) + if err != nil { + return chkdecls.Dyn + } + return t } // OutputType returns the output type of the expression if the Ast has been type-checked, else // returns cel.DynType as the parse step cannot infer types. func (ast *Ast) OutputType() *Type { - t, err := ExprTypeToType(ast.ResultType()) - if err != nil { + t, found := ast.typeMap[ast.expr.GetId()] + if !found { return DynType } return t @@ -87,22 +93,33 @@ func (ast *Ast) Source() Source { } // FormatType converts a type message into a string representation. +// +// Deprecated: prefer FormatCELType func FormatType(t *exprpb.Type) string { return checker.FormatCheckedType(t) } +// FormatCELType formats a cel.Type value to a string representation. +// +// The type formatting is identical to FormatType. +func FormatCELType(t *Type) string { + return checker.FormatCELType(t) +} + // Env encapsulates the context necessary to perform parsing, type checking, or generation of // evaluable programs for different expressions. type Env struct { Container *containers.Container - functions map[string]*functionDecl - declarations []*exprpb.Decl + variables []*decls.VariableDecl + functions map[string]*decls.FunctionDecl macros []parser.Macro - adapter ref.TypeAdapter - provider ref.TypeProvider + adapter types.Adapter + provider types.Provider features map[int]bool appliedFeatures map[int]bool libraries map[string]bool + validators []ASTValidator + costOptions []checker.CostOption // Internal parser representation prsr *parser.Parser @@ -154,8 +171,8 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { return nil, err } return (&Env{ - declarations: []*exprpb.Decl{}, - functions: map[string]*functionDecl{}, + variables: []*decls.VariableDecl{}, + functions: map[string]*decls.FunctionDecl{}, macros: []parser.Macro{}, Container: containers.DefaultContainer, adapter: registry, @@ -163,14 +180,20 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { features: map[int]bool{}, appliedFeatures: map[int]bool{}, libraries: map[string]bool{}, + validators: []ASTValidator{}, progOpts: []ProgramOption{}, + costOptions: []checker.CostOption{}, }).configure(opts) } // Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. +// If any `ASTValidators` are configured on the environment, they will be applied after a valid +// type-check result. If any issues are detected, the validators will provide them on the +// output Issues object. // -// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil. -// Issues should be inspected if they are non-nil, but may not represent a fatal error. +// Either checking or validation has failed if the returned Issues value and its Issues.Err() +// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a +// fatal error. // // It is possible to have both non-nil Ast and Issues values returned from this call: however, // the mere presence of an Ast does not imply that it is valid for use. @@ -183,21 +206,38 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { if err != nil { errs := common.NewErrors(ast.Source()) errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssues(errs) + return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) } res, errs := checker.Check(pe, ast.Source(), chk) if len(errs.GetErrors()) > 0 { - return nil, NewIssues(errs) + return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. - return &Ast{ + ast = &Ast{ source: ast.Source(), - expr: res.GetExpr(), - info: res.GetSourceInfo(), - refMap: res.GetReferenceMap(), - typeMap: res.GetTypeMap()}, nil + expr: res.Expr, + info: res.SourceInfo, + refMap: res.ReferenceMap, + typeMap: res.TypeMap} + + // Generate a validator configuration from the set of configured validators. + vConfig := newValidatorConfig() + for _, v := range e.validators { + if cv, ok := v.(ASTValidatorConfigurer); ok { + cv.Configure(vConfig) + } + } + // Apply additional validators on the type-checked result. + iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + for _, v := range e.validators { + v.Validate(e, vConfig, res, iss) + } + if iss.Err() != nil { + return nil, iss + } + return ast, nil } // Compile combines the Parse and Check phases CEL program compilation to produce an Ast and @@ -255,7 +295,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. - decsCopy := []*exprpb.Decl{} + varsCopy := []*decls.VariableDecl{} if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. @@ -263,8 +303,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { } else { // If the type-checker has not been instantiated, ensure the unvalidated declarations are // provided to the extended Env instance. - decsCopy = make([]*exprpb.Decl, len(e.declarations)) - copy(decsCopy, e.declarations) + varsCopy = make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) } // Copy macros and program options @@ -276,8 +316,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { // Copy the adapter / provider if they appear to be mutable. adapter := e.adapter provider := e.provider - adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry) - providerReg, isProviderReg := e.provider.(ref.TypeRegistry) + adapterReg, isAdapterReg := e.adapter.(*types.Registry) + providerReg, isProviderReg := e.provider.(*types.Registry) // In most cases the provider and adapter will be a ref.TypeRegistry; // however, in the rare cases where they are not, they are assumed to // be immutable. Since it is possible to set the TypeProvider separately @@ -308,7 +348,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { for k, v := range e.appliedFeatures { appliedFeaturesCopy[k] = v } - funcsCopy := make(map[string]*functionDecl, len(e.functions)) + funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions)) for k, v := range e.functions { funcsCopy[k] = v } @@ -316,10 +356,14 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { for k, v := range e.libraries { libsCopy[k] = v } + validatorsCopy := make([]ASTValidator, len(e.validators)) + copy(validatorsCopy, e.validators) + costOptsCopy := make([]checker.CostOption, len(e.costOptions)) + copy(costOptsCopy, e.costOptions) ext := &Env{ Container: e.Container, - declarations: decsCopy, + variables: varsCopy, functions: funcsCopy, macros: macsCopy, progOpts: progOptsCopy, @@ -327,9 +371,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { features: featuresCopy, appliedFeatures: appliedFeaturesCopy, libraries: libsCopy, + validators: validatorsCopy, provider: provider, chkOpts: chkOptsCopy, prsrOpts: prsrOptsCopy, + costOptions: costOptsCopy, } return ext.configure(opts) } @@ -347,6 +393,25 @@ func (e *Env) HasLibrary(libName string) bool { return exists && configured } +// Libraries returns a list of SingletonLibrary that have been configured in the environment. +func (e *Env) Libraries() []string { + libraries := make([]string, 0, len(e.libraries)) + for libName := range e.libraries { + libraries = append(libraries, libName) + } + return libraries +} + +// HasValidator returns whether a specific ASTValidator has been configured in the environment. +func (e *Env) HasValidator(name string) bool { + for _, v := range e.validators { + if v.Name() == name { + return true + } + } + return false +} + // Parse parses the input expression value `txt` to a Ast and/or a set of Issues. // // This form of Parse creates a Source value for the input `txt` and forwards to the @@ -388,36 +453,64 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { return newProgram(e, ast, optSet) } +// CELTypeAdapter returns the `types.Adapter` configured for the environment. +func (e *Env) CELTypeAdapter() types.Adapter { + return e.adapter +} + +// CELTypeProvider returns the `types.Provider` configured for the environment. +func (e *Env) CELTypeProvider() types.Provider { + return e.provider +} + // TypeAdapter returns the `ref.TypeAdapter` configured for the environment. +// +// Deprecated: use CELTypeAdapter() func (e *Env) TypeAdapter() ref.TypeAdapter { return e.adapter } // TypeProvider returns the `ref.TypeProvider` configured for the environment. +// +// Deprecated: use CELTypeProvider() func (e *Env) TypeProvider() ref.TypeProvider { - return e.provider + if legacyProvider, ok := e.provider.(ref.TypeProvider); ok { + return legacyProvider + } + return &interopLegacyTypeProvider{Provider: e.provider} } -// UnknownVars returns an interpreter.PartialActivation which marks all variables -// declared in the Env as unknown AttributePattern values. +// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the +// Env as unknown AttributePattern values. // -// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation -// unless the PartialAttributes option is provided as a ProgramOption. +// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. func (e *Env) UnknownVars() interpreter.PartialActivation { - var unknownPatterns []*interpreter.AttributePattern - for _, d := range e.declarations { - switch d.GetDeclKind().(type) { - case *exprpb.Decl_Ident: - unknownPatterns = append(unknownPatterns, - interpreter.NewAttributePattern(d.GetName())) - } - } - part, _ := PartialVars( - interpreter.EmptyActivation(), - unknownPatterns...) + act := interpreter.EmptyActivation() + part, _ := PartialVars(act, e.computeUnknownVars(act)...) return part } +// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable +// set, but which have been configured in the environment, are marked as unknown. +// +// The `vars` value may either be an interpreter.Activation or any valid input to the +// interpreter.NewActivation call. +// +// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown +// variables. For more advanced use cases of partial state where portions of an object graph, rather +// than top-level variables, are missing the PartialVars() method may be a more suitable choice. +// +// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. +func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { + act, err := interpreter.NewActivation(vars) + if err != nil { + return nil, err + } + return PartialVars(act, e.computeUnknownVars(act)...) +} + // ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the // attribute references which are unknown. // @@ -463,11 +556,16 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // extension functions provided by estimator. func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { - checked, err := AstToCheckedExpr(ast) - if err != nil { - return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err) + checked := &celast.CheckedAST{ + Expr: ast.Expr(), + SourceInfo: ast.SourceInfo(), + TypeMap: ast.typeMap, + ReferenceMap: ast.refMap, } - return checker.Cost(checked, estimator, opts...) + extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) + extendedOpts = append(extendedOpts, opts...) + extendedOpts = append(extendedOpts, e.costOptions...) + return checker.Cost(checked, estimator, extendedOpts...) } // configure applies a series of EnvOptions to the current environment. @@ -488,14 +586,6 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { return nil, err } - // Initialize all of the functions configured within the environment. - for _, fn := range e.functions { - err = fn.init() - if err != nil { - return nil, err - } - } - // Configure the parser. prsrOpts := []parser.Option{} prsrOpts = append(prsrOpts, e.prsrOpts...) @@ -504,6 +594,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { if e.HasFeature(featureEnableMacroCallTracking) { prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true)) } + if e.HasFeature(featureVariadicLogicalASTs) { + prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) + } e.prsr, err = parser.NewParser(prsrOpts...) if err != nil { return nil, err @@ -525,8 +618,6 @@ func (e *Env) initChecker() (*checker.Env, error) { chkOpts := []checker.Option{} chkOpts = append(chkOpts, e.chkOpts...) chkOpts = append(chkOpts, - checker.HomogeneousAggregateLiterals( - e.HasFeature(featureDisableDynamicAggregateLiterals)), checker.CrossTypeNumericComparisons( e.HasFeature(featureCrossTypeNumericComparisons))) @@ -536,19 +627,17 @@ func (e *Env) initChecker() (*checker.Env, error) { return } // Add the statically configured declarations. - err = ce.Add(e.declarations...) + err = ce.AddIdents(e.variables...) if err != nil { e.setCheckerOrError(nil, err) return } // Add the function declarations which are derived from the FunctionDecl instances. for _, fn := range e.functions { - fnDecl, err := functionDeclToExprDecl(fn) - if err != nil { - e.setCheckerOrError(nil, err) - return + if fn.IsDeclarationDisabled() { + continue } - err = ce.Add(fnDecl) + err = ce.AddFunctions(fn) if err != nil { e.setCheckerOrError(nil, err) return @@ -596,17 +685,43 @@ func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) { return e, nil } +// computeUnknownVars determines a set of missing variables based on the input activation and the +// environment's configured declaration set. +func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern { + var unknownPatterns []*interpreter.AttributePattern + for _, v := range e.variables { + varName := v.Name() + if _, found := vars.ResolveName(varName); found { + continue + } + unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName)) + } + return unknownPatterns +} + +// Error type which references an expression id, a location within source, and a message. +type Error = common.Error + // Issues defines methods for inspecting the error details of parse and check calls. // // Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. type Issues struct { errs *common.Errors + info *exprpb.SourceInfo } // NewIssues returns an Issues struct from a common.Errors object. func NewIssues(errs *common.Errors) *Issues { + return NewIssuesWithSourceInfo(errs, nil) +} + +// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata +// which can be used with the `ReportErrorAtID` method for additional error reports within the context +// information that's inferred from an expression id. +func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues { return &Issues{ errs: errs, + info: info, } } @@ -622,9 +737,9 @@ func (i *Issues) Err() error { } // Errors returns the collection of errors encountered in more granular detail. -func (i *Issues) Errors() []common.Error { +func (i *Issues) Errors() []*Error { if i == nil { - return []common.Error{} + return []*Error{} } return i.errs.GetErrors() } @@ -648,6 +763,37 @@ func (i *Issues) String() string { return i.errs.ToDisplayString() } +// ReportErrorAtID reports an error message with an optional set of formatting arguments. +// +// The source metadata for the expression at `id`, if present, is attached to the error report. +// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. +func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { + i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...) +} + +// locationByID returns a common.Location given an expression id. +// +// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source +// as this implementation relies on the abstractions present in the protobuf SourceInfo object, +// and is replicated in the checker. +func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location { + positions := sourceInfo.GetPositions() + var line = 1 + if offset, found := positions[id]; found { + col := int(offset) + for _, lineOffset := range sourceInfo.GetLineOffsets() { + if lineOffset < offset { + line++ + col = int(offset - lineOffset) + } else { + break + } + } + return common.NewLocation(line, col) + } + return common.NoLocation +} + // getStdEnv lazy initializes the CEL standard environment. func getStdEnv() (*Env, error) { stdEnvInit.Do(func() { @@ -656,6 +802,90 @@ func getStdEnv() (*Env, error) { return stdEnv, stdEnvErr } +// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider. +type interopCELTypeProvider struct { + ref.TypeProvider +} + +// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. +// +// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if et, found := p.FindType(typeName); found { + t, err := types.ExprTypeToType(et) + if err != nil { + return nil, false + } + return t, true + } + return nil, false +} + +// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field +// name, if one exists. +// +// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { + if ft, found := p.FindFieldType(structType, fieldName); found { + t, err := types.ExprTypeToType(ft.Type) + if err != nil { + return nil, false + } + return &types.FieldType{ + Type: t, + IsSet: ft.IsSet, + GetFrom: ft.GetFrom, + }, true + } + return nil, false +} + +// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider. +type interopLegacyTypeProvider struct { + types.Provider +} + +// FindType retruns the protobuf Type representation for the input type name if one exists. +// +// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type +// value to a protobuf Type representation. +// +// Failure to convert the type will result in the type not being found. +func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { + if t, found := p.FindStructType(typeName); found { + et, err := types.TypeToExprType(t) + if err != nil { + return nil, false + } + return et, true + } + return nil, false +} + +// FindFieldType returns the protobuf-based FieldType representation for the input type name and field, +// if one exists. +// +// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType +// value to a protobuf-based ref.FieldType representation if found. +// +// Failure to convert the FieldType will result in the field not being found. +func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + if cft, found := p.FindStructFieldType(structType, fieldName); found { + et, err := types.TypeToExprType(cft.Type) + if err != nil { + return nil, false + } + return &ref.FieldType{ + Type: et, + IsSet: cft.IsSet, + GetFrom: cft.GetFrom, + }, true + } + return nil, false +} + var ( stdEnvInit sync.Once stdEnv *Env diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index 93ded3cf1..80f63140e 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -22,6 +22,7 @@ import ( "google.golang.org/protobuf/proto" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -33,7 +34,8 @@ import ( // CheckedExprToAst converts a checked expression proto message to an Ast. func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { - return CheckedExprToAstWithSource(checkedExpr, nil) + checked, _ := CheckedExprToAstWithSource(checkedExpr, nil) + return checked } // CheckedExprToAstWithSource converts a checked expression proto message to an Ast, @@ -44,29 +46,18 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { // through future calls. // // Prefer CheckedExprToAst if loading expressions from storage. -func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast { - refMap := checkedExpr.GetReferenceMap() - if refMap == nil { - refMap = map[int64]*exprpb.Reference{} - } - typeMap := checkedExpr.GetTypeMap() - if typeMap == nil { - typeMap = map[int64]*exprpb.Type{} - } - si := checkedExpr.GetSourceInfo() - if si == nil { - si = &exprpb.SourceInfo{} - } - if src == nil { - src = common.NewInfoSource(si) +func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { + checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr) + if err != nil { + return nil, err } return &Ast{ - expr: checkedExpr.GetExpr(), - info: si, + expr: checkedAST.Expr, + info: checkedAST.SourceInfo, source: src, - refMap: refMap, - typeMap: typeMap, - } + refMap: checkedAST.ReferenceMap, + typeMap: checkedAST.TypeMap, + }, nil } // AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. @@ -76,12 +67,13 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } - return &exprpb.CheckedExpr{ - Expr: a.Expr(), - SourceInfo: a.SourceInfo(), + cAst := &ast.CheckedAST{ + Expr: a.expr, + SourceInfo: a.info, ReferenceMap: a.refMap, TypeMap: a.typeMap, - }, nil + } + return ast.CheckedASTToCheckedExpr(cAst) } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -202,7 +194,7 @@ func RefValueToValue(res ref.Val) (*exprpb.Value, error) { } var ( - typeNameToTypeValue = map[string]*types.TypeValue{ + typeNameToTypeValue = map[string]ref.Val{ "bool": types.BoolType, "bytes": types.BytesType, "double": types.DoubleType, @@ -219,7 +211,7 @@ var ( ) // ValueToRefValue converts between exprpb.Value and ref.Val. -func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) { +func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { switch v.Kind.(type) { case *exprpb.Value_NullValue: return types.NullValue, nil diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index bcfd44f78..4d232085c 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -15,19 +15,18 @@ package cel import ( + "math" "strconv" "strings" "time" - "github.com/google/cel-go/checker" - "github.com/google/cel-go/common" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/stdlib" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/interpreter" - "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -35,6 +34,7 @@ import ( const ( optMapMacro = "optMap" + optFlatMapMacro = "optFlatMap" hasValueFunc = "hasValue" optionalNoneFunc = "optional.none" optionalOfFunc = "optional.of" @@ -106,44 +106,213 @@ func (stdLibrary) LibraryName() string { return "cel.lib.std" } -// EnvOptions returns options for the standard CEL function declarations and macros. +// CompileOptions returns options for the standard CEL function declarations and macros. func (stdLibrary) CompileOptions() []EnvOption { return []EnvOption{ - Declarations(checker.StandardDeclarations()...), + func(e *Env) (*Env, error) { + var err error + for _, fn := range stdlib.Functions() { + existing, found := e.functions[fn.Name()] + if found { + fn, err = existing.Merge(fn) + if err != nil { + return nil, err + } + } + e.functions[fn.Name()] = fn + } + return e, nil + }, + func(e *Env) (*Env, error) { + e.variables = append(e.variables, stdlib.Types()...) + return e, nil + }, Macros(StandardMacros...), } } // ProgramOptions returns function implementations for the standard CEL functions. func (stdLibrary) ProgramOptions() []ProgramOption { - return []ProgramOption{ - Functions(functions.StandardOverloads()...), + return []ProgramOption{} +} + +// OptionalTypes enable support for optional syntax and types in CEL. +// +// The optional value type makes it possible to express whether variables have +// been provided, whether a result has been computed, and in the future whether +// an object field path, map key value, or list index has a value. +// +// # Syntax Changes +// +// OptionalTypes are unlike other CEL extensions because they modify the CEL +// syntax itself, notably through the use of a `?` preceding a field name or +// index value. +// +// ## Field Selection +// +// The optional syntax in field selection is denoted as `obj.?field`. In other +// words, if a field is set, return `optional.of(obj.field)“, else +// `optional.none()`. The optional field selection is viral in the sense that +// after the first optional selection all subsequent selections or indices +// are treated as optional, i.e. the following expressions are equivalent: +// +// obj.?field.subfield +// obj.?field.?subfield +// +// ## Indexing +// +// Similar to field selection, the optional syntax can be used in index +// expressions on maps and lists: +// +// list[?0] +// map[?key] +// +// ## Optional Field Setting +// +// When creating map or message literals, if a field may be optionally set +// based on its presence, then placing a `?` before the field name or key +// will ensure the type on the right-hand side must be optional(T) where T +// is the type of the field or key-value. +// +// The following returns a map with the key expression set only if the +// subfield is present, otherwise an empty map is created: +// +// {?key: obj.?field.subfield} +// +// ## Optional Element Setting +// +// When creating list literals, an element in the list may be optionally added +// when the element expression is preceded by a `?`: +// +// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c] +// +// # Optional.Of +// +// Create an optional(T) value of a given value with type T. +// +// optional.of(10) +// +// # Optional.OfNonZeroValue +// +// Create an optional(T) value of a given value with type T if it is not a +// zero-value. A zero-value the default empty value for any given CEL type, +// including empty protobuf message types. If the value is empty, the result +// of this call will be optional.none(). +// +// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int)) +// optional.ofNonZeroValue([]) // optional.none() +// optional.ofNonZeroValue(0) // optional.none() +// optional.ofNonZeroValue("") // optional.none() +// +// # Optional.None +// +// Create an empty optional value. +// +// # HasValue +// +// Determine whether the optional contains a value. +// +// optional.of(b'hello').hasValue() // true +// optional.ofNonZeroValue({}).hasValue() // false +// +// # Value +// +// Get the value contained by the optional. If the optional does not have a +// value, the result will be a CEL error. +// +// optional.of(b'hello').value() // b'hello' +// optional.ofNonZeroValue({}).value() // error +// +// # Or +// +// If the value on the left-hand side is optional.none(), the optional value +// on the right hand side is returned. If the value on the left-hand set is +// valued, then it is returned. This operation is short-circuiting and will +// only evaluate as many links in the `or` chain as are needed to return a +// non-empty optional value. +// +// obj.?field.or(m[?key]) +// l[?index].or(obj.?field.subfield).or(obj.?other) +// +// # OrValue +// +// Either return the value contained within the optional on the left-hand side +// or return the alternative value on the right hand side. +// +// m[?key].orValue("none") +// +// # OptMap +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return an optional typed result based on the transformation. The +// transformation expression type must return a type T which is wrapped into +// an optional. +// +// msg.?elements.optMap(e, e.size()).orValue(0) +// +// # OptFlatMap +// +// Introduced in version: 1 +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return the result. The transform expression must return an optional(T) +// rather than type T. This can be useful when dealing with zero values and +// conditionally generating an empty or non-empty result in ways which cannot +// be expressed with `optMap`. +// +// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. +func OptionalTypes(opts ...OptionalTypesOption) EnvOption { + lib := &optionalLib{version: math.MaxUint32} + for _, opt := range opts { + lib = opt(lib) } + return Lib(lib) } -type optionalLibrary struct{} +type optionalLib struct { + version uint32 +} + +// OptionalTypesOption is a functional interface for configuring the strings library. +type OptionalTypesOption func(*optionalLib) *optionalLib + +// OptionalTypesVersion configures the version of the optional type library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func OptionalTypesVersion(version uint32) OptionalTypesOption { + return func(lib *optionalLib) *optionalLib { + lib.version = version + return lib + } +} // LibraryName implements the SingletonLibrary interface method. -func (optionalLibrary) LibraryName() string { +func (lib *optionalLib) LibraryName() string { return "cel.lib.optional" } // CompileOptions implements the Library interface method. -func (optionalLibrary) CompileOptions() []EnvOption { +func (lib *optionalLib) CompileOptions() []EnvOption { paramTypeK := TypeParamType("K") paramTypeV := TypeParamType("V") optionalTypeV := OptionalType(paramTypeV) listTypeV := ListType(paramTypeV) mapTypeKV := MapType(paramTypeK, paramTypeV) - return []EnvOption{ + opts := []EnvOption{ // Enable the optional syntax in the parser. enableOptionalSyntax(), // Introduce the optional type. Types(types.OptionalType), - // Configure the optMap macro. + // Configure the optMap and optFlatMap macros. Macros(NewReceiverMacro(optMapMacro, 2, optMap)), // Global and member functions for working with optional values. @@ -202,21 +371,29 @@ func (optionalLibrary) CompileOptions() []EnvOption { // Index overloads to accommodate using an optional value as the operand. Function(operators.Index, Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), - Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + } + if lib.version >= 1 { + opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap))) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (lib *optionalLib) ProgramOptions() []ProgramOption { + return []ProgramOption{ + CustomDecorator(decorateOptionalOr), } } -func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { varIdent := args[0] varName := "" switch varIdent.GetExprKind().(type) { case *exprpb.Expr_IdentExpr: varName = varIdent.GetIdentExpr().GetName() default: - return nil, &common.Error{ - Message: "optMap() variable name must be a simple identifier", - Location: meh.OffsetLocation(varIdent.GetId()), - } + return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier") } mapExpr := args[1] return meh.GlobalCall( @@ -237,11 +414,30 @@ func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exp ), nil } -// ProgramOptions implements the Library interface method. -func (optionalLibrary) ProgramOptions() []ProgramOption { - return []ProgramOption{ - CustomDecorator(decorateOptionalOr), +func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + varIdent := args[0] + varName := "" + switch varIdent.GetExprKind().(type) { + case *exprpb.Expr_IdentExpr: + varName = varIdent.GetIdentExpr().GetName() + default: + return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier") } + mapExpr := args[1] + return meh.GlobalCall( + operators.Conditional, + meh.ReceiverCall(hasValueFunc, target), + meh.Fold( + unusedIterVar, + meh.NewList(), + varName, + meh.ReceiverCall(valueFunc, target), + meh.LiteralBool(false), + meh.Ident(varName), + mapExpr, + ), + meh.GlobalCall(optionalNoneFunc), + ), nil } func enableOptionalSyntax() EnvOption { @@ -358,28 +554,16 @@ var ( timeOverloadDeclarations = []EnvOption{ Function(overloads.TimeGetHours, MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType, - UnaryBinding(func(dur ref.Val) ref.Val { - d := dur.(types.Duration) - return types.Int(d.Hours()) - }))), + UnaryBinding(types.DurationGetHours))), Function(overloads.TimeGetMinutes, MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType, - UnaryBinding(func(dur ref.Val) ref.Val { - d := dur.(types.Duration) - return types.Int(d.Minutes()) - }))), + UnaryBinding(types.DurationGetMinutes))), Function(overloads.TimeGetSeconds, MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType, - UnaryBinding(func(dur ref.Val) ref.Val { - d := dur.(types.Duration) - return types.Int(d.Seconds()) - }))), + UnaryBinding(types.DurationGetSeconds))), Function(overloads.TimeGetMilliseconds, MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType, - UnaryBinding(func(dur ref.Val) ref.Val { - d := dur.(types.Duration) - return types.Int(d.Milliseconds()) - }))), + UnaryBinding(types.DurationGetMilliseconds))), Function(overloads.TimeGetFullYear, MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType, UnaryBinding(func(ts ref.Val) ref.Val { diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go index e48c5bf8e..1eb414c8b 100644 --- a/vendor/github.com/google/cel-go/cel/macro.go +++ b/vendor/github.com/google/cel-go/cel/macro.go @@ -15,7 +15,6 @@ package cel import ( - "github.com/google/cel-go/common" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -63,21 +62,21 @@ func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { } // HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) -func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { return parser.MakeHas(meh, target, args) } // ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) -func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { return parser.MakeExists(meh, target, args) } // ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) -func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { return parser.MakeExistsOne(meh, target, args) } @@ -91,14 +90,14 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex // // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. -func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { return parser.MakeMap(meh, target, args) } // FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) -func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { return parser.MakeFilter(meh, target, args) } diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index 07f3d6c71..05867730d 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -23,12 +23,13 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/dynamicpb" - "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/checker" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" - "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -41,13 +42,6 @@ import ( const ( _ = iota - // Disallow heterogeneous aggregate (list, map) literals. - // Note, it is still possible to have heterogeneous aggregates when - // provided as variables to the expression, as well as via conversion - // of well-known dynamic types, or with unchecked expressions. - // Affects checking. Provides a subset of standard behavior. - featureDisableDynamicAggregateLiterals - // Enable the tracking of function call expressions replaced by macros. featureEnableMacroCallTracking @@ -63,9 +57,10 @@ const ( // is not already in UTC. featureDefaultUTCTimeZone - // Enable the use of optional types in the syntax, type-system, type-checking, - // and runtime. - featureOptionalTypes + // Enable the serialization of logical operator ASTs as variadic calls, thus + // compressing the logic graph to a single call when multiple like-operator + // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) + featureVariadicLogicalASTs ) // EnvOption is a functional interface for configuring the environment. @@ -82,23 +77,26 @@ func ClearMacros() EnvOption { } } -// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one. +// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one. // // Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption { +func CustomTypeAdapter(adapter types.Adapter) EnvOption { return func(e *Env) (*Env, error) { e.adapter = adapter return e, nil } } -// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one. +// CustomTypeProvider replaces the types.Provider implementation with a custom one. +// +// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated) // // Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeProvider(provider ref.TypeProvider) EnvOption { +func CustomTypeProvider(provider any) EnvOption { return func(e *Env) (*Env, error) { - e.provider = provider - return e, nil + var err error + e.provider, err = maybeInteropProvider(provider) + return e, err } } @@ -108,8 +106,28 @@ func CustomTypeProvider(provider ref.TypeProvider) EnvOption { // for the environment. The NewEnv call builds on top of the standard CEL declarations. For a // purely custom set of declarations use NewCustomEnv. func Declarations(decls ...*exprpb.Decl) EnvOption { + declOpts := []EnvOption{} + var err error + var opt EnvOption + // Convert the declarations to `EnvOption` values ahead of time. + // Surface any errors in conversion when the options are applied. + for _, d := range decls { + opt, err = ExprDeclToDeclaration(d) + if err != nil { + break + } + declOpts = append(declOpts, opt) + } return func(e *Env) (*Env, error) { - e.declarations = append(e.declarations, decls...) + if err != nil { + return nil, err + } + for _, o := range declOpts { + e, err = o(e) + if err != nil { + return nil, err + } + } return e, nil } } @@ -126,14 +144,25 @@ func EagerlyValidateDeclarations(enabled bool) EnvOption { return features(featureEagerlyValidateDeclarations, enabled) } -// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree -// during type-checking. +// HomogeneousAggregateLiterals disables mixed type list and map literal values. // // Note, it is still possible to have heterogeneous aggregates when provided as variables to the // expression, as well as via conversion of well-known dynamic types, or with unchecked // expressions. func HomogeneousAggregateLiterals() EnvOption { - return features(featureDisableDynamicAggregateLiterals, true) + return ASTValidators(ValidateHomogeneousAggregateLiterals()) +} + +// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single +// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as +// it will reduce the number of recursive calls needed to deserialize the AST later. +// +// For example, given the following expression the call graph will be rendered accordingly: +// +// expression: a && b && c && (d || e) +// ast: call(_&&_, [a, b, c, call(_||_, [d, e])]) +func variadicLogicalOperatorASTs() EnvOption { + return features(featureVariadicLogicalASTs, true) } // Macros option extends the macro set configured in the environment. @@ -226,7 +255,12 @@ func Abbrevs(qualifiedNames ...string) EnvOption { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) + var reg ref.TypeRegistry + var isReg bool + reg, isReg = e.provider.(*types.Registry) + if !isReg { + reg, isReg = e.provider.(ref.TypeRegistry) + } if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -436,6 +470,24 @@ func InterruptCheckFrequency(checkFrequency uint) ProgramOption { } } +// CostEstimatorOptions configure type-check time options for estimating expression cost. +func CostEstimatorOptions(costOpts ...checker.CostOption) EnvOption { + return func(e *Env) (*Env, error) { + e.costOptions = append(e.costOptions, costOpts...) + return e, nil + } +} + +// CostTrackerOptions configures a set of options for cost-tracking. +// +// Note, CostTrackerOptions is a no-op unless CostTracking is also enabled. +func CostTrackerOptions(costOpts ...interpreter.CostTrackerOption) ProgramOption { + return func(p *prog) (*prog, error) { + p.costOptions = append(p.costOptions, costOpts...) + return p, nil + } +} + // CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls. func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption { return func(p *prog) (*prog, error) { @@ -457,25 +509,21 @@ func CostLimit(costLimit uint64) ProgramOption { } } -func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) { +func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) { if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind { msgName := (string)(field.Message().FullName()) - wellKnownType, found := pb.CheckedWellKnowns[msgName] - if found { - return wellKnownType, nil - } - return decls.NewObjectType(msgName), nil + return ObjectType(msgName), nil } - if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found { + if primitiveType, found := types.ProtoCELPrimitives[field.Kind()]; found { return primitiveType, nil } if field.Kind() == protoreflect.EnumKind { - return decls.Int, nil + return IntType, nil } return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String()) } -func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) { +func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) { name := string(field.Name()) if field.IsMap() { mapKey := field.MapKey() @@ -488,20 +536,20 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) { if err != nil { return nil, err } - return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil + return Variable(name, MapType(keyType, valueType)), nil } if field.IsList() { elemType, err := fieldToCELType(field) if err != nil { return nil, err } - return decls.NewVar(name, decls.NewListType(elemType)), nil + return Variable(name, ListType(elemType)), nil } celType, err := fieldToCELType(field) if err != nil { return nil, err } - return decls.NewVar(name, celType), nil + return Variable(name, celType), nil } // DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto. @@ -509,23 +557,51 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) { // https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { return func(e *Env) (*Env, error) { - var decls []*exprpb.Decl fields := descriptor.Fields() for i := 0; i < fields.Len(); i++ { field := fields.Get(i) - decl, err := fieldToDecl(field) + variable, err := fieldToVariable(field) + if err != nil { + return nil, err + } + e, err = variable(e) if err != nil { return nil, err } - decls = append(decls, decl) } - var err error - e, err = Declarations(decls...)(e) + return Types(dynamicpb.NewMessage(descriptor))(e) + } +} + +// ContextProtoVars uses the fields of the input proto.Messages as top-level variables within an Activation. +// +// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using +// protocol buffers. +func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) { + if ctx == nil || !ctx.ProtoReflect().IsValid() { + return interpreter.EmptyActivation(), nil + } + reg, err := types.NewRegistry(ctx) + if err != nil { + return nil, err + } + pbRef := ctx.ProtoReflect() + typeName := string(pbRef.Descriptor().FullName()) + fields := pbRef.Descriptor().Fields() + vars := make(map[string]any, fields.Len()) + for i := 0; i < fields.Len(); i++ { + field := fields.Get(i) + sft, found := reg.FindStructFieldType(typeName, field.TextName()) + if !found { + return nil, fmt.Errorf("no such field: %s", field.TextName()) + } + fieldVal, err := sft.GetFrom(ctx) if err != nil { return nil, err } - return Types(dynamicpb.NewMessage(descriptor))(e) + vars[field.TextName()] = fieldVal } + return interpreter.NewActivation(vars) } // EnableMacroCallTracking ensures that call expressions which are replaced by macros @@ -545,13 +621,6 @@ func DefaultUTCTimeZone(enabled bool) EnvOption { return features(featureDefaultUTCTimeZone, enabled) } -// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes -// it possible to express whether variables have been provided, whether a result has been computed, -// and in the future whether an object field path, map key value, or list index has a value. -func OptionalTypes() EnvOption { - return Lib(optionalLibrary{}) -} - // features sets the given feature flags. See list of Feature constants above. func features(flag int, enabled bool) EnvOption { return func(e *Env) (*Env, error) { @@ -577,3 +646,14 @@ func ParserExpressionSizeLimit(limit int) EnvOption { return e, nil } } + +func maybeInteropProvider(provider any) (types.Provider, error) { + switch p := provider.(type) { + case types.Provider: + return p, nil + case ref.TypeProvider: + return &interopCELTypeProvider{TypeProvider: p}, nil + default: + return nil, fmt.Errorf("unsupported type provider: %T", provider) + } +} diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index a630f5bfa..2dd72f750 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,11 +19,10 @@ import ( "fmt" "sync" + celast "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Program is an evaluable view of an Ast. @@ -62,6 +61,9 @@ func NoVars() interpreter.Activation { // PartialVars returns a PartialActivation which contains variables and a set of AttributePattern // values that indicate variables or parts of variables whose value are not yet known. // +// This method relies on manually configured sets of missing attribute patterns. For a method which +// infers the missing variables from the input and the configured environment, use Env.PartialVars(). +// // The `vars` value may either be an interpreter.Activation or any valid input to the // interpreter.NewActivation call. func PartialVars(vars any, @@ -104,7 +106,7 @@ func (ed *EvalDetails) State() interpreter.EvalState { // ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled. // Otherwise, returns nil if the cost was not enabled. func (ed *EvalDetails) ActualCost() *uint64 { - if ed.costTracker == nil { + if ed == nil || ed.costTracker == nil { return nil } cost := ed.costTracker.ActualCost() @@ -128,10 +130,14 @@ type prog struct { // Interpretable configured from an Ast and aggregate decorator set based on program options. interpretable interpreter.Interpretable callCostEstimator interpreter.ActualCostEstimator + costOptions []interpreter.CostTrackerOption costLimit *uint64 } func (p *prog) clone() *prog { + costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions)) + copy(costOptsCopy, p.costOptions) + return &prog{ Env: p.Env, evalOpts: p.evalOpts, @@ -153,9 +159,10 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Ensure the default attribute factory is set after the adapter and provider are // configured. p := &prog{ - Env: e, - decorators: []interpreter.InterpretableDecorator{}, - dispatcher: disp, + Env: e, + decorators: []interpreter.InterpretableDecorator{}, + dispatcher: disp, + costOptions: []interpreter.CostTrackerOption{}, } // Configure the program via the ProgramOption values. @@ -169,7 +176,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Add the function bindings created via Function() options. for _, fn := range e.functions { - bindings, err := fn.bindings() + bindings, err := fn.Bindings() if err != nil { return nil, err } @@ -208,14 +215,11 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { } // Enable compile-time checking of syntax/cardinality for string.format calls. if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat { - var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error) + var isValidType func(id int64, validTypes ...ref.Type) (bool, error) if ast.IsChecked() { - isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { - t, err := ExprTypeToType(ast.typeMap[id]) - if err != nil { - return false, err - } - if t.kind == DynKind { + isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { + t := ast.typeMap[id] + if t.Kind() == DynKind { return true, nil } for _, vt := range validTypes { @@ -223,7 +227,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { if err != nil { return false, err } - if k == t.kind { + if t.Kind() == k { return true, nil } } @@ -231,7 +235,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { } } else { // if the AST isn't type-checked, short-circuit validation - isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { + isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { return true, nil } } @@ -243,6 +247,12 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { costTracker.Estimator = p.callCostEstimator costTracker.Limit = p.costLimit + for _, costOpt := range p.costOptions { + err := costOpt(costTracker) + if err != nil { + return nil, err + } + } // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This // prevents the underlying memory from being shared between factory function calls causing // undesired mutations. @@ -284,10 +294,11 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor } // When the AST has been checked it contains metadata that can be used to speed up program execution. - var checked *exprpb.CheckedExpr - checked, err := AstToCheckedExpr(ast) - if err != nil { - return nil, err + checked := &celast.CheckedAST{ + Expr: ast.Expr(), + SourceInfo: ast.SourceInfo(), + TypeMap: ast.typeMap, + ReferenceMap: ast.refMap, } interpretable, err := p.interpreter.NewInterpretable(checked, decs...) if err != nil { @@ -371,7 +382,11 @@ type progGen struct { // the test is successful. func newProgGen(factory progFactory) (Program, error) { // Test the factory to make sure that configuration errors are spotted at config - _, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{}) + tracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, err + } + _, err = factory(interpreter.NewEvalState(), tracker) if err != nil { return nil, err } @@ -384,7 +399,10 @@ func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) { // new EvalState instance for each call to ensure that unique evaluations yield unique stateful // results. state := interpreter.NewEvalState() - costTracker := &interpreter.CostTracker{} + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } det := &EvalDetails{state: state, costTracker: costTracker} // Generate a new instance of the interpretable using the factory configured during the call to @@ -412,7 +430,10 @@ func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalD // new EvalState instance for each call to ensure that unique evaluations yield unique stateful // results. state := interpreter.NewEvalState() - costTracker := &interpreter.CostTracker{} + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } det := &EvalDetails{state: state, costTracker: costTracker} // Generate a new instance of the interpretable using the factory configured during the call to @@ -498,7 +519,7 @@ type evalActivation struct { // The lazy binding will only be invoked once per evaluation. // // Values which are not represented as ref.Val types on input may be adapted to a ref.Val using -// the ref.TypeAdapter configured in the environment. +// the types.Adapter configured in the environment. func (a *evalActivation) ResolveName(name string) (any, bool) { v, found := a.vars[name] if !found { diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go new file mode 100644 index 000000000..78b311381 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/validator.go @@ -0,0 +1,388 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + "reflect" + "regexp" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +const ( + homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous" + + // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure + // the set of function names which are exempt from homogeneous type checks. The expected type + // is a string list of function names. + // + // As an example, the `.format([args])` call expects the input arguments list to be + // comprised of a variety of types which correspond to the types expected by the format control + // clauses; however, all other uses of a mixed element type list, would be unexpected. + HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt" +) + +// ASTValidators configures a set of ASTValidator instances into the target environment. +// +// Validators are applied in the order in which the are specified and are treated as singletons. +// The same ASTValidator with a given name will not be applied more than once. +func ASTValidators(validators ...ASTValidator) EnvOption { + return func(e *Env) (*Env, error) { + for _, v := range validators { + if !e.HasValidator(v.Name()) { + e.validators = append(e.validators, v) + } + } + return e, nil + } +} + +// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment. +// +// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be +// reported to the caller. +type ASTValidator interface { + // Name returns the name of the validator. Names must be unique. + Name() string + + // Validate validates a given Ast within an Environment and collects a set of potential issues. + // + // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to + // the invocation of the Validate call. The expectation is that the validator configuration + // is created in sequence and immutable once provided to the Validate call. + // + // See individual validators for more information on their configuration keys and configuration + // properties. + Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues) +} + +// ValidatorConfig provides an accessor method for querying validator configuration state. +type ValidatorConfig interface { + GetOrDefault(name string, value any) any +} + +// MutableValidatorConfig provides mutation methods for querying and updating validator configuration +// settings. +type MutableValidatorConfig interface { + ValidatorConfig + Set(name string, value any) error +} + +// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator, +// participates in validator configuration settings. +// +// This interface may be split from the expectation of being an ASTValidator instance in the future. +type ASTValidatorConfigurer interface { + Configure(MutableValidatorConfig) error +} + +// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces. +type validatorConfig struct { + data map[string]any +} + +// newValidatorConfig initializes the validator config with default values for core CEL validators. +func newValidatorConfig() *validatorConfig { + return &validatorConfig{ + data: map[string]any{ + HomogeneousAggregateLiteralExemptFunctions: []string{}, + }, + } +} + +// GetOrDefault returns the configured value for the name, if present, else the input default value. +// +// Note, the type-agreement between the input default and configured value is not checked on read. +func (config *validatorConfig) GetOrDefault(name string, value any) any { + v, found := config.data[name] + if !found { + return value + } + return v +} + +// Set configures a validator option with the given name and value. +// +// If the value had previously been set, the new value must have the same reflection type as the old one, +// or the call will error. +func (config *validatorConfig) Set(name string, value any) error { + v, found := config.data[name] + if found && reflect.TypeOf(v) != reflect.TypeOf(value) { + return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v) + } + config.data[name] = value + return nil +} + +// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors. +// +// - Validate duration and timestamp literals +// - Ensure regex strings are valid +// - Disable mixed type list and map literals +func ExtendedValidations() EnvOption { + return ASTValidators( + ValidateDurationLiterals(), + ValidateTimestampLiterals(), + ValidateRegexLiterals(), + ValidateHomogeneousAggregateLiterals(), + ) +} + +// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check. +func ValidateDurationLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall) +} + +// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check. +func ValidateTimestampLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall) +} + +// ValidateRegexLiterals ensures that regex patterns are validated after type-check. +func ValidateRegexLiterals() ASTValidator { + return newFormatValidator(overloads.Matches, 0, compileRegex) +} + +// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e. +// no mixed list element types or mixed map key or map value types. +// +// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all +// literals which occur within string format calls. +func ValidateHomogeneousAggregateLiterals() ASTValidator { + return homogeneousAggregateLiteralValidator{} +} + +// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit. +// +// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial +// time to complete. +// +// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have +// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable +// assignments and supplies an empty iteration range, so they won't count against the nesting limit either. +func ValidateComprehensionNestingLimit(limit int) ASTValidator { + return nestingLimitValidator{limit: limit} +} + +type argChecker func(env *Env, call, arg ast.NavigableExpr) error + +func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { + return formatValidator{ + funcName: funcName, + check: check, + argNum: argNum, + } +} + +type formatValidator struct { + funcName string + argNum int + check argChecker +} + +// Name returns the unique name of this function format validator. +func (v formatValidator) Name() string { + return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName) +} + +// Validate searches the AST for uses of a given function name with a constant argument and performs a check +// on whether the argument is a valid literal value. +func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { + root := ast.NavigateCheckedAST(a) + funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) + for _, call := range funcCalls { + callArgs := call.AsCall().Args() + if len(callArgs) <= v.argNum { + continue + } + litArg := callArgs[v.argNum] + if litArg.Kind() != ast.LiteralKind { + continue + } + if err := v.check(e, call, litArg); err != nil { + iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName) + } + } +} + +func evalCall(env *Env, call, arg ast.NavigableExpr) error { + ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()}) + prg, err := env.Program(ast) + if err != nil { + return err + } + _, _, err = prg.Eval(NoVars()) + return err +} + +func compileRegex(_ *Env, _, arg ast.NavigableExpr) error { + pattern := arg.AsLiteral().Value().(string) + _, err := regexp.Compile(pattern) + return err +} + +type homogeneousAggregateLiteralValidator struct{} + +// Name returns the unique name of the homogeneous type validator. +func (homogeneousAggregateLiteralValidator) Name() string { + return homogeneousValidatorName +} + +// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard +// and exempt functions from homogeneous aggregate literal checks. +// +// TODO: Move this call into the string.format() ASTValidator once ported. +func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error { + emptyList := []string{} + exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string) + exemptFunctions = append(exemptFunctions, "format") + return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions) +} + +// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. +// +// This validator makes an exception for list and map literals which occur at any level of nesting within +// string format calls. +func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) { + var exemptedFunctions []string + exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) + root := ast.NavigateCheckedAST(a) + listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) + for _, listExpr := range listExprs { + if inExemptFunction(listExpr, exemptedFunctions) { + continue + } + l := listExpr.AsList() + elements := l.Elements() + optIndices := l.OptionalIndices() + var elemType *Type + for i, e := range elements { + et := e.Type() + if isOptionalIndex(i, optIndices) { + et = et.Parameters()[0] + } + if elemType == nil { + elemType = et + continue + } + if !elemType.IsEquivalentType(et) { + v.typeMismatch(iss, e.ID(), elemType, et) + break + } + } + } + mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind)) + for _, mapExpr := range mapExprs { + if inExemptFunction(mapExpr, exemptedFunctions) { + continue + } + m := mapExpr.AsMap() + entries := m.Entries() + var keyType, valType *Type + for _, e := range entries { + key, val := e.Key(), e.Value() + kt, vt := key.Type(), val.Type() + if e.IsOptional() { + vt = vt.Parameters()[0] + } + if keyType == nil && valType == nil { + keyType, valType = kt, vt + continue + } + if !keyType.IsEquivalentType(kt) { + v.typeMismatch(iss, key.ID(), keyType, kt) + } + if !valType.IsEquivalentType(vt) { + v.typeMismatch(iss, val.ID(), valType, vt) + } + } + } +} + +func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { + if parent, found := e.Parent(); found { + if parent.Kind() == ast.CallKind { + fnName := parent.AsCall().FunctionName() + for _, exempt := range exemptFunctions { + if exempt == fnName { + return true + } + } + } + if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind { + return inExemptFunction(parent, exemptFunctions) + } + } + return false +} + +func isOptionalIndex(i int, optIndices []int32) bool { + for _, optInd := range optIndices { + if i == int(optInd) { + return true + } + } + return false +} + +func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) { + iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual)) +} + +type nestingLimitValidator struct { + limit int +} + +func (v nestingLimitValidator) Name() string { + return "cel.lib.std.validate.comprehension_nesting_limit" +} + +func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { + root := ast.NavigateCheckedAST(a) + comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) + if len(comprehensions) <= v.limit { + return + } + for _, comp := range comprehensions { + count := 0 + e := comp + hasParent := true + for hasParent { + // When the expression is not a comprehension, continue to the next ancestor. + if e.Kind() != ast.ComprehensionKind { + e, hasParent = e.Parent() + continue + } + // When the comprehension has an empty range, continue to the next ancestor + // as this comprehension does not have any associated cost. + iterRange := e.AsComprehension().IterRange() + if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 { + e, hasParent = e.Parent() + continue + } + // Otherwise check the nesting limit. + count++ + if count > v.limit { + iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit") + break + } + e, hasParent = e.Parent() + } + } +} diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 1c6ddb7f7..0459d3523 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -11,9 +11,11 @@ go_library( "cost.go", "env.go", "errors.go", + "format.go", "mapping.go", "options.go", "printer.go", + "scopes.go", "standard.go", "types.go", ], @@ -22,10 +24,13 @@ go_library( deps = [ "//checker/decls:go_default_library", "//common:go_default_library", + "//common/ast:go_default_library", "//common/containers:go_default_library", "//common/debug:go_default_library", + "//common/decls:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", @@ -44,6 +49,7 @@ go_test( "checker_test.go", "cost_test.go", "env_test.go", + "format_test.go", ], embed = [ ":go_default_library", diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 257cffecf..720e4fa96 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -18,15 +18,13 @@ package checker import ( "fmt" - "reflect" - "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types/ref" - - "google.golang.org/protobuf/proto" + "github.com/google/cel-go/common/types" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -37,8 +35,8 @@ type checker struct { mappings *mapping freeTypeVarCounter int sourceInfo *exprpb.SourceInfo - types map[int64]*exprpb.Type - references map[int64]*exprpb.Reference + types map[int64]*types.Type + references map[int64]*ast.ReferenceInfo } // Check performs type checking, giving a typed AST. @@ -47,40 +45,38 @@ type checker struct { // descriptions of protocol buffers, and a registry for errors. // Returns a CheckedExpr proto, which might not be usable if // there are errors in the error registry. -func Check(parsedExpr *exprpb.ParsedExpr, - source common.Source, - env *Env) (*exprpb.CheckedExpr, *common.Errors) { +func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) { + errs := common.NewErrors(source) c := checker{ env: env, - errors: &typeErrors{common.NewErrors(source)}, + errors: &typeErrors{errs: errs}, mappings: newMapping(), freeTypeVarCounter: 0, sourceInfo: parsedExpr.GetSourceInfo(), - types: make(map[int64]*exprpb.Type), - references: make(map[int64]*exprpb.Reference), + types: make(map[int64]*types.Type), + references: make(map[int64]*ast.ReferenceInfo), } c.check(parsedExpr.GetExpr()) // Walk over the final type map substituting any type parameters either by their bound value or // by DYN. - m := make(map[int64]*exprpb.Type) - for k, v := range c.types { - m[k] = substitute(c.mappings, v, true) + m := make(map[int64]*types.Type) + for id, t := range c.types { + m[id] = substitute(c.mappings, t, true) } - return &exprpb.CheckedExpr{ + return &ast.CheckedAST{ Expr: parsedExpr.GetExpr(), SourceInfo: parsedExpr.GetSourceInfo(), TypeMap: m, ReferenceMap: c.references, - }, c.errors.Errors + }, errs } func (c *checker) check(e *exprpb.Expr) { if e == nil { return } - switch e.GetExprKind().(type) { case *exprpb.Expr_ConstExpr: literal := e.GetConstExpr() @@ -113,53 +109,51 @@ func (c *checker) check(e *exprpb.Expr) { case *exprpb.Expr_ComprehensionExpr: c.checkComprehension(e) default: - c.errors.ReportError( - c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e)) + c.errors.unexpectedASTType(e.GetId(), c.location(e), e) } } func (c *checker) checkInt64Literal(e *exprpb.Expr) { - c.setType(e, decls.Int) + c.setType(e, types.IntType) } func (c *checker) checkUint64Literal(e *exprpb.Expr) { - c.setType(e, decls.Uint) + c.setType(e, types.UintType) } func (c *checker) checkStringLiteral(e *exprpb.Expr) { - c.setType(e, decls.String) + c.setType(e, types.StringType) } func (c *checker) checkBytesLiteral(e *exprpb.Expr) { - c.setType(e, decls.Bytes) + c.setType(e, types.BytesType) } func (c *checker) checkDoubleLiteral(e *exprpb.Expr) { - c.setType(e, decls.Double) + c.setType(e, types.DoubleType) } func (c *checker) checkBoolLiteral(e *exprpb.Expr) { - c.setType(e, decls.Bool) + c.setType(e, types.BoolType) } func (c *checker) checkNullLiteral(e *exprpb.Expr) { - c.setType(e, decls.Null) + c.setType(e, types.NullType) } func (c *checker) checkIdent(e *exprpb.Expr) { identExpr := e.GetIdentExpr() // Check to see if the identifier is declared. if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil { - c.setType(e, ident.GetIdent().GetType()) - c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) // Overwrite the identifier with its fully qualified name. - identExpr.Name = ident.GetName() + identExpr.Name = ident.Name() return } - c.setType(e, decls.Error) - c.errors.undeclaredReference( - c.location(e), c.env.container.Name(), identExpr.GetName()) + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName()) } func (c *checker) checkSelect(e *exprpb.Expr) { @@ -174,9 +168,9 @@ func (c *checker) checkSelect(e *exprpb.Expr) { // Rewrite the node to be a variable reference to the resolved fully-qualified // variable name. - c.setType(e, ident.GetIdent().GetType()) - c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) - identName := ident.GetName() + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + identName := ident.Name() e.ExprKind = &exprpb.Expr_IdentExpr{ IdentExpr: &exprpb.Expr_Ident{ Name: identName, @@ -188,7 +182,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) { resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false) if sel.TestOnly { - resultType = decls.Bool + resultType = types.BoolType } c.setType(e, substitute(c.mappings, resultType, false)) } @@ -200,16 +194,17 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) { field := call.GetArgs()[1] fieldName, isString := maybeUnwrapString(field) if !isString { - c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field) + c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field) return } // Perform type-checking using the field selection logic. resultType := c.checkSelectField(e, operand, fieldName, true) c.setType(e, substitute(c.mappings, resultType, false)) + c.setReference(e, ast.NewFunctionReference("select_optional_field")) } -func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type { +func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type { // Interpret as field selection, first traversing down the operand. c.check(operand) operandType := substitute(c.mappings, c.getType(operand), false) @@ -218,38 +213,37 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option targetType, isOpt := maybeUnwrapOptional(operandType) // Assume error type by default as most types do not support field selection. - resultType := decls.Error - switch kindOf(targetType) { - case kindMap: + resultType := types.ErrorType + switch targetType.Kind() { + case types.MapKind: // Maps yield their value type as the selection result type. - mapType := targetType.GetMapType() - resultType = mapType.GetValueType() - case kindObject: + resultType = targetType.Parameters()[1] + case types.StructKind: // Objects yield their field type declaration as the selection result type, but only if // the field is defined. messageType := targetType - if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found { - resultType = fieldType.Type + if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found { + resultType = fieldType } - case kindTypeParam: + case types.TypeParamKind: // Set the operand type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type // substitutions for the type param under the covers. - c.isAssignable(decls.Dyn, targetType) + c.isAssignable(types.DynType, targetType) // Also, set the result type to DYN. - resultType = decls.Dyn + resultType = types.DynType default: // Dynamic / error values are treated as DYN type. Errors are handled this way as well // in order to allow forward progress on the check. if !isDynOrError(targetType) { - c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType) + c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType) } - resultType = decls.Dyn + resultType = types.DynType } // If the target type was optional coming in, then the result must be optional going out. if isOpt || optional { - return decls.NewOptionalType(resultType) + return types.NewOptionalType(resultType) } return resultType } @@ -277,15 +271,14 @@ func (c *checker) checkCall(e *exprpb.Expr) { // Check for the existence of the function. fn := c.env.LookupFunction(fnName) if fn == nil { - c.errors.undeclaredReference( - c.location(e), c.env.container.Name(), fnName) - c.setType(e, decls.Error) + c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.setType(e, types.ErrorType) return } // Overwrite the function name with its fully qualified resolved name. - call.Function = fn.GetName() + call.Function = fn.Name() // Check to see whether the overload resolves. - c.resolveOverloadOrError(c.location(e), e, fn, nil, args) + c.resolveOverloadOrError(e, fn, nil, args) return } @@ -303,8 +296,8 @@ func (c *checker) checkCall(e *exprpb.Expr) { // be an inaccurate representation of the desired evaluation behavior. // Overwrite with fully-qualified resolved function name sans receiver target. call.Target = nil - call.Function = fn.GetName() - c.resolveOverloadOrError(c.location(e), e, fn, nil, args) + call.Function = fn.Name() + c.resolveOverloadOrError(e, fn, nil, args) return } } @@ -314,22 +307,21 @@ func (c *checker) checkCall(e *exprpb.Expr) { fn := c.env.LookupFunction(fnName) // Function found, attempt overload resolution. if fn != nil { - c.resolveOverloadOrError(c.location(e), e, fn, target, args) + c.resolveOverloadOrError(e, fn, target, args) return } // Function name not declared, record error. - c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName) + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) } func (c *checker) resolveOverloadOrError( - loc common.Location, - e *exprpb.Expr, - fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) { + e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) { // Attempt to resolve the overload. - resolution := c.resolveOverload(loc, fn, target, args) + resolution := c.resolveOverload(e, fn, target, args) // No such overload, error noted in the resolveOverload call, type recorded here. if resolution == nil { - c.setType(e, decls.Error) + c.setType(e, types.ErrorType) return } // Overload found. @@ -338,10 +330,9 @@ func (c *checker) resolveOverloadOrError( } func (c *checker) resolveOverload( - loc common.Location, - fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { + call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { - var argTypes []*exprpb.Type + var argTypes []*types.Type if target != nil { argTypes = append(argTypes, c.getType(target)) } @@ -349,55 +340,75 @@ func (c *checker) resolveOverload( argTypes = append(argTypes, c.getType(arg)) } - var resultType *exprpb.Type - var checkedRef *exprpb.Reference - for _, overload := range fn.GetFunction().GetOverloads() { + var resultType *types.Type + var checkedRef *ast.ReferenceInfo + for _, overload := range fn.OverloadDecls() { // Determine whether the overload is currently considered. - if c.env.isOverloadDisabled(overload.GetOverloadId()) { + if c.env.isOverloadDisabled(overload.ID()) { continue } // Ensure the call style for the overload matches. - if (target == nil && overload.GetIsInstanceFunction()) || - (target != nil && !overload.GetIsInstanceFunction()) { + if (target == nil && overload.IsMemberFunction()) || + (target != nil && !overload.IsMemberFunction()) { // not a compatible call style. continue } - overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...) - if len(overload.GetTypeParams()) > 0 { + // Alternative type-checking behavior when the logical operators are compacted into + // variadic AST representations. + if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr { + checkedRef = ast.NewFunctionReference(overload.ID()) + for i, argType := range argTypes { + if !c.isAssignable(argType, types.BoolType) { + c.errors.typeMismatch( + args[i].GetId(), + c.locationByID(args[i].GetId()), + types.BoolType, + argType) + resultType = types.ErrorType + } + } + if isError(resultType) { + return nil + } + return newResolution(checkedRef, types.BoolType) + } + + overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...) + typeParams := overload.TypeParams() + if len(typeParams) != 0 { // Instantiate overload's type with fresh type variables. substitutions := newMapping() - for _, typePar := range overload.GetTypeParams() { - substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar()) + for _, typePar := range typeParams { + substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar()) } overloadType = substitute(substitutions, overloadType, false) } - candidateArgTypes := overloadType.GetFunction().GetArgTypes() + candidateArgTypes := overloadType.Parameters()[1:] if c.isAssignableList(argTypes, candidateArgTypes) { if checkedRef == nil { - checkedRef = newFunctionReference(overload.GetOverloadId()) + checkedRef = ast.NewFunctionReference(overload.ID()) } else { - checkedRef.OverloadId = append(checkedRef.GetOverloadId(), overload.GetOverloadId()) + checkedRef.AddOverload(overload.ID()) } // First matching overload, determines result type. - fnResultType := substitute(c.mappings, overloadType.GetFunction().GetResultType(), false) + fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false) if resultType == nil { resultType = fnResultType - } else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) { - resultType = decls.Dyn + } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) { + resultType = types.DynType } } } if resultType == nil { - for i, arg := range argTypes { - argTypes[i] = substitute(c.mappings, arg, true) + for i, argType := range argTypes { + argTypes[i] = substitute(c.mappings, argType, true) } - c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil) - resultType = decls.Error + c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil) return nil } @@ -406,7 +417,7 @@ func (c *checker) resolveOverload( func (c *checker) checkCreateList(e *exprpb.Expr) { create := e.GetListExpr() - var elemsType *exprpb.Type + var elemsType *types.Type optionalIndices := create.GetOptionalIndices() optionals := make(map[int32]bool, len(optionalIndices)) for _, optInd := range optionalIndices { @@ -419,16 +430,16 @@ func (c *checker) checkCreateList(e *exprpb.Expr) { var isOptional bool elemType, isOptional = maybeUnwrapOptional(elemType) if !isOptional && !isDyn(elemType) { - c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType) + c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType) } } - elemsType = c.joinTypes(c.location(e), elemsType, elemType) + elemsType = c.joinTypes(e, elemsType, elemType) } if elemsType == nil { // If the list is empty, assign free type var to elem type. elemsType = c.newTypeVar() } - c.setType(e, decls.NewListType(elemsType)) + c.setType(e, types.NewListType(elemsType)) } func (c *checker) checkCreateStruct(e *exprpb.Expr) { @@ -442,12 +453,12 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) { func (c *checker) checkCreateMap(e *exprpb.Expr) { mapVal := e.GetStructExpr() - var mapKeyType *exprpb.Type - var mapValueType *exprpb.Type + var mapKeyType *types.Type + var mapValueType *types.Type for _, ent := range mapVal.GetEntries() { key := ent.GetMapKey() c.check(key) - mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key)) + mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) val := ent.GetValue() c.check(val) @@ -456,50 +467,54 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType) + c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType) } } - mapValueType = c.joinTypes(c.location(val), mapValueType, valType) + mapValueType = c.joinTypes(val, mapValueType, valType) } if mapKeyType == nil { // If the map is empty, assign free type variables to typeKey and value type. mapKeyType = c.newTypeVar() mapValueType = c.newTypeVar() } - c.setType(e, decls.NewMapType(mapKeyType, mapValueType)) + c.setType(e, types.NewMapType(mapKeyType, mapValueType)) } func (c *checker) checkCreateMessage(e *exprpb.Expr) { msgVal := e.GetStructExpr() // Determine the type of the message. - messageType := decls.Error - decl := c.env.LookupIdent(msgVal.GetMessageName()) - if decl == nil { + resultType := types.ErrorType + ident := c.env.LookupIdent(msgVal.GetMessageName()) + if ident == nil { c.errors.undeclaredReference( - c.location(e), c.env.container.Name(), msgVal.GetMessageName()) + e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName()) + c.setType(e, types.ErrorType) return } // Ensure the type name is fully qualified in the AST. - msgVal.MessageName = decl.GetName() - c.setReference(e, newIdentReference(decl.GetName(), nil)) - ident := decl.GetIdent() - identKind := kindOf(ident.GetType()) - if identKind != kindError { - if identKind != kindType { - c.errors.notAType(c.location(e), ident.GetType()) + typeName := ident.Name() + msgVal.MessageName = typeName + c.setReference(e, ast.NewIdentReference(ident.Name(), nil)) + identKind := ident.Type().Kind() + if identKind != types.ErrorKind { + if identKind != types.TypeKind { + c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName()) } else { - messageType = ident.GetType().GetType() - if kindOf(messageType) != kindObject { - c.errors.notAMessageType(c.location(e), messageType) - messageType = decls.Error + resultType = ident.Type().Parameters()[0] + // Backwards compatibility test between well-known types and message types + // In this context, the type is being instantiated by its protobuf name which + // is not ideal or recommended, but some users expect this to work. + if isWellKnownType(resultType) { + typeName = getWellKnownTypeName(resultType) + } else if resultType.Kind() == types.StructKind { + typeName = resultType.DeclaredTypeName() + } else { + c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName()) + resultType = types.ErrorType } } } - if isObjectWellKnownType(messageType) { - c.setType(e, getObjectWellKnownType(messageType)) - } else { - c.setType(e, messageType) - } + c.setType(e, resultType) // Check the field initializers. for _, ent := range msgVal.GetEntries() { @@ -507,10 +522,10 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { value := ent.GetValue() c.check(value) - fieldType := decls.Error - ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field) + fieldType := types.ErrorType + ft, found := c.lookupFieldType(ent.GetId(), typeName, field) if found { - fieldType = ft.Type + fieldType = ft } valType := c.getType(value) @@ -518,11 +533,11 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType) + c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType) } } if !c.isAssignable(fieldType, valType) { - c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType) + c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType) } } } @@ -533,36 +548,36 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { c.check(comp.GetAccuInit()) accuType := c.getType(comp.GetAccuInit()) rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false) - var varType *exprpb.Type + var varType *types.Type - switch kindOf(rangeType) { - case kindList: - varType = rangeType.GetListType().GetElemType() - case kindMap: + switch rangeType.Kind() { + case types.ListKind: + varType = rangeType.Parameters()[0] + case types.MapKind: // Ranges over the keys. - varType = rangeType.GetMapType().GetKeyType() - case kindDyn, kindError, kindTypeParam: + varType = rangeType.Parameters()[0] + case types.DynKind, types.ErrorKind, types.TypeParamKind: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type // substitutions for the type param under the covers. - c.isAssignable(decls.Dyn, rangeType) + c.isAssignable(types.DynType, rangeType) // Set the range iteration variable to type DYN as well. - varType = decls.Dyn + varType = types.DynType default: - c.errors.notAComprehensionRange(c.location(comp.GetIterRange()), rangeType) - varType = decls.Error + c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType) + varType = types.ErrorType } // Create a scope for the comprehension since it has a local accumulation variable. // This scope will contain the accumulation variable used to compute the result. c.env = c.env.enterScope() - c.env.Add(decls.NewVar(comp.GetAccuVar(), accuType)) + c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType)) // Create a block scope for the loop. c.env = c.env.enterScope() - c.env.Add(decls.NewVar(comp.GetIterVar(), varType)) + c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType)) // Check the variable references in the condition and step. c.check(comp.GetLoopCondition()) - c.assertType(comp.GetLoopCondition(), decls.Bool) + c.assertType(comp.GetLoopCondition(), types.BoolType) c.check(comp.GetLoopStep()) c.assertType(comp.GetLoopStep(), accuType) // Exit the loop's block scope before checking the result. @@ -574,9 +589,7 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { } // Checks compatibility of joined types, and returns the most general common type. -func (c *checker) joinTypes(loc common.Location, - previous *exprpb.Type, - current *exprpb.Type) *exprpb.Type { +func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type { if previous == nil { return current } @@ -584,23 +597,23 @@ func (c *checker) joinTypes(loc common.Location, return mostGeneral(previous, current) } if c.dynAggregateLiteralElementTypesEnabled() { - return decls.Dyn + return types.DynType } - c.errors.typeMismatch(loc, previous, current) - return decls.Error + c.errors.typeMismatch(e.GetId(), c.location(e), previous, current) + return types.ErrorType } func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { return c.env.aggLitElemType == dynElementType } -func (c *checker) newTypeVar() *exprpb.Type { +func (c *checker) newTypeVar() *types.Type { id := c.freeTypeVarCounter c.freeTypeVarCounter++ - return decls.NewTypeParamType(fmt.Sprintf("_var%d", id)) + return types.NewTypeParamType(fmt.Sprintf("_var%d", id)) } -func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool { +func (c *checker) isAssignable(t1, t2 *types.Type) bool { subs := isAssignable(c.mappings, t1, t2) if subs != nil { c.mappings = subs @@ -610,7 +623,7 @@ func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool { return false } -func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool { +func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { subs := isAssignableList(c.mappings, l1, l2) if subs != nil { c.mappings = subs @@ -620,57 +633,52 @@ func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool { return false } -func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) { - if _, found := c.env.provider.FindType(messageType); !found { - // This should not happen, anyway, report an error. - c.errors.unexpectedFailedResolution(l, messageType) - return nil, false - } - - if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found { - return ft, found +func maybeUnwrapString(e *exprpb.Expr) (string, bool) { + switch e.GetExprKind().(type) { + case *exprpb.Expr_ConstExpr: + literal := e.GetConstExpr() + switch literal.GetConstantKind().(type) { + case *exprpb.Constant_StringValue: + return literal.GetStringValue(), true + } } - - c.errors.undefinedField(l, fieldName) - return nil, false + return "", false } -func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) { - if old, found := c.types[e.GetId()]; found && !proto.Equal(old, t) { - c.errors.ReportError(c.location(e), - "(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t) +func (c *checker) setType(e *exprpb.Expr, t *types.Type) { + if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) { + c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t) return } c.types[e.GetId()] = t } -func (c *checker) getType(e *exprpb.Expr) *exprpb.Type { +func (c *checker) getType(e *exprpb.Expr) *types.Type { return c.types[e.GetId()] } -func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) { - if old, found := c.references[e.GetId()]; found && !proto.Equal(old, r) { - c.errors.ReportError(c.location(e), - "Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, r) +func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) { + if old, found := c.references[e.GetId()]; found && !old.Equals(r) { + c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r) return } c.references[e.GetId()] = r } -func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) { +func (c *checker) assertType(e *exprpb.Expr, t *types.Type) { if !c.isAssignable(t, c.getType(e)) { - c.errors.typeMismatch(c.location(e), t, c.getType(e)) + c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e)) } } type overloadResolution struct { - Reference *exprpb.Reference - Type *exprpb.Type + Type *types.Type + Reference *ast.ReferenceInfo } -func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution { +func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { return &overloadResolution{ - Reference: checkedRef, + Reference: r, Type: t, } } @@ -697,10 +705,56 @@ func (c *checker) locationByID(id int64) common.Location { return common.NoLocation } -func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference { - return &exprpb.Reference{Name: name, Value: value} +func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { + if _, found := c.env.provider.FindStructType(structType); !found { + // This should not happen, anyway, report an error. + c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType) + return nil, false + } + + if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found { + return ft.Type, found + } + + c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName) + return nil, false +} + +func isWellKnownType(t *types.Type) bool { + switch t.Kind() { + case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind: + return true + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind: + return t.IsAssignableType(types.NullType) + case types.ListKind: + return t.Parameters()[0] == types.DynType + case types.MapKind: + return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType + } + return false } -func newFunctionReference(overloads ...string) *exprpb.Reference { - return &exprpb.Reference{OverloadId: overloads} +func getWellKnownTypeName(t *types.Type) string { + if name, found := wellKnownTypes[t.Kind()]; found { + return name + } + return "" } + +var ( + wellKnownTypes = map[types.Kind]string{ + types.AnyKind: "google.protobuf.Any", + types.BoolKind: "google.protobuf.BoolValue", + types.BytesKind: "google.protobuf.BytesValue", + types.DoubleKind: "google.protobuf.DoubleValue", + types.DurationKind: "google.protobuf.Duration", + types.DynKind: "google.protobuf.Value", + types.IntKind: "google.protobuf.Int64Value", + types.ListKind: "google.protobuf.ListValue", + types.NullTypeKind: "google.protobuf.NullValue", + types.MapKind: "google.protobuf.Struct", + types.StringKind: "google.protobuf.StringValue", + types.TimestampKind: "google.protobuf.Timestamp", + types.UintKind: "google.protobuf.UInt64Value", + } +) diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go index ef58df766..f232f30da 100644 --- a/vendor/github.com/google/cel-go/checker/cost.go +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -18,7 +18,9 @@ import ( "math" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -54,7 +56,7 @@ type AstNode interface { // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. Path() []string // Type returns the deduced type of the AstNode. - Type() *exprpb.Type + Type() *types.Type // Expr returns the expression of the AstNode. Expr() *exprpb.Expr // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. @@ -66,7 +68,7 @@ type AstNode interface { type astNode struct { path []string - t *exprpb.Type + t *types.Type expr *exprpb.Expr derivedSize *SizeEstimate } @@ -75,7 +77,7 @@ func (e astNode) Path() []string { return e.path } -func (e astNode) Type() *exprpb.Type { +func (e astNode) Type() *types.Type { return e.t } @@ -228,7 +230,7 @@ func addUint64NoOverflow(x, y uint64) uint64 { // multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 // is returned. func multiplyUint64NoOverflow(x, y uint64) uint64 { - if x > 0 && y > 0 && x > math.MaxUint64/y { + if y != 0 && x > math.MaxUint64/y { return math.MaxUint64 } return x * y @@ -240,7 +242,11 @@ func multiplyByCostFactor(x uint64, y float64) uint64 { if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y { return math.MaxUint64 } - return uint64(math.Ceil(xFloat * y)) + ceil := math.Ceil(xFloat * y) + if ceil >= doubleTwoTo64 { + return math.MaxUint64 + } + return uint64(ceil) } var ( @@ -258,9 +264,10 @@ type coster struct { // iterRanges tracks the iterRange of each iterVar. iterRanges iterRangeScopes // computedSizes tracks the computed sizes of call results. - computedSizes map[int64]SizeEstimate - checkedExpr *exprpb.CheckedExpr - estimator CostEstimator + computedSizes map[int64]SizeEstimate + checkedAST *ast.CheckedAST + estimator CostEstimator + overloadEstimators map[string]FunctionEstimator // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. presenceTestCost CostEstimate } @@ -289,6 +296,7 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) { type CostOption func(*coster) error // PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// // Defaults to presence test has a cost of one. func PresenceTestHasCost(hasCost bool) CostOption { return func(c *coster) error { @@ -301,15 +309,30 @@ func PresenceTestHasCost(hasCost bool) CostOption { } } +// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair. +type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate + +// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID. +// +// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to +// the Cost() call. +func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption { + return func(c *coster) error { + c.overloadEstimators[overloadID] = functionCoster + return nil + } +} + // Cost estimates the cost of the parsed and type checked CEL expression. -func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { +func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { c := &coster{ - checkedExpr: checker, - estimator: estimator, - exprPath: map[int64][]string{}, - iterRanges: map[string][]int64{}, - computedSizes: map[int64]SizeEstimate{}, - presenceTestCost: CostEstimate{Min: 1, Max: 1}, + checkedAST: checker, + estimator: estimator, + overloadEstimators: map[string]FunctionEstimator{}, + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, } for _, opt := range opts { err := opt(c) @@ -317,7 +340,7 @@ func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOpti return CostEstimate{}, err } } - return c.cost(checker.GetExpr()), nil + return c.cost(checker.Expr), nil } func (c *coster) cost(e *exprpb.Expr) CostEstimate { @@ -351,10 +374,10 @@ func (c *coster) costIdent(e *exprpb.Expr) CostEstimate { // build and track the field path if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok { - switch c.checkedExpr.TypeMap[iterRange].GetTypeKind().(type) { - case *exprpb.Type_ListType_: + switch c.checkedAST.TypeMap[iterRange].Kind() { + case types.ListKind: c.addPath(e, append(c.exprPath[iterRange], "@items")) - case *exprpb.Type_MapType_: + case types.MapKind: c.addPath(e, append(c.exprPath[iterRange], "@keys")) } } else { @@ -378,8 +401,8 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate { } sum = sum.Add(c.cost(sel.GetOperand())) targetType := c.getType(sel.GetOperand()) - switch kindOf(targetType) { - case kindMap, kindObject, kindTypeParam: + switch targetType.Kind() { + case types.MapKind, types.StructKind, types.TypeParamKind: sum = sum.Add(selectAndIdentCost) } @@ -403,8 +426,8 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { argTypes[i] = c.newAstNode(arg) } - ref := c.checkedExpr.ReferenceMap[e.GetId()] - if ref == nil || len(ref.GetOverloadId()) == 0 { + ref := c.checkedAST.ReferenceMap[e.GetId()] + if ref == nil || len(ref.OverloadIDs) == 0 { return CostEstimate{} } var targetType AstNode @@ -417,7 +440,7 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { // Pick a cost estimate range that covers all the overload cost estimation ranges fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate - for _, overload := range ref.GetOverloadId() { + for _, overload := range ref.OverloadIDs { overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { @@ -530,7 +553,14 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args } return sum } - + if len(c.overloadEstimators) != 0 { + if estimator, found := c.overloadEstimators[overloadID]; found { + if est := estimator(c.estimator, target, args); est != nil { + callEst := *est + return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} + } + } + } if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil { callEst := *est return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} @@ -641,8 +671,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} } -func (c *coster) getType(e *exprpb.Expr) *exprpb.Type { - return c.checkedExpr.TypeMap[e.GetId()] +func (c *coster) getType(e *exprpb.Expr) *types.Type { + return c.checkedAST.TypeMap[e.GetId()] } func (c *coster) getPath(e *exprpb.Expr) []string { @@ -663,22 +693,24 @@ func (c *coster) newAstNode(e *exprpb.Expr) *astNode { if size, ok := c.computedSizes[e.GetId()]; ok { derivedSize = &size } - return &astNode{path: path, t: c.getType(e), expr: e, derivedSize: derivedSize} + return &astNode{ + path: path, + t: c.getType(e), + expr: e, + derivedSize: derivedSize} } // isScalar returns true if the given type is known to be of a constant size at // compile time. isScalar will return false for strings (they are variable-width) // in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time). -func isScalar(t *exprpb.Type) bool { - switch kindOf(t) { - case kindPrimitive: - if t.GetPrimitive() != exprpb.Type_STRING && t.GetPrimitive() != exprpb.Type_BYTES { - return true - } - case kindWellKnown: - if t.GetWellKnown() == exprpb.Type_DURATION || t.GetWellKnown() == exprpb.Type_TIMESTAMP { - return true - } +func isScalar(t *types.Type) bool { + switch t.Kind() { + case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: + return true } return false } + +var ( + doubleTwoTo64 = math.Ldexp(1.0, 64) +) diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel index 9384be450..a6b0be292 100644 --- a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel @@ -9,7 +9,6 @@ go_library( name = "go_default_library", srcs = [ "decls.go", - "scopes.go", ], importpath = "github.com/google/cel-go/checker/decls", deps = [ diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go index be89d2d68..70682b17c 100644 --- a/vendor/github.com/google/cel-go/checker/env.go +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -18,17 +18,11 @@ import ( "fmt" "strings" - "google.golang.org/protobuf/proto" - - "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/pb" - "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) type aggregateLiteralElementType int @@ -76,15 +70,15 @@ var ( // which can be used to assist with type-checking. type Env struct { container *containers.Container - provider ref.TypeProvider - declarations *decls.Scopes + provider types.Provider + declarations *Scopes aggLitElemType aggregateLiteralElementType filteredOverloadIDs map[string]struct{} } // NewEnv returns a new *Env with the given parameters. -func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...Option) (*Env, error) { - declarations := decls.NewScopes() +func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) { + declarations := newScopes() declarations.Push() envOptions := &options{} @@ -113,24 +107,31 @@ func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ... }, nil } -// Add adds new Decl protos to the Env. -// Returns an error for identifier redeclarations. -func (e *Env) Add(decls ...*exprpb.Decl) error { +// AddIdents configures the checker with a list of variable declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error { errMsgs := make([]errorMsg, 0) - for _, decl := range decls { - switch decl.DeclKind.(type) { - case *exprpb.Decl_Ident: - errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl))) - case *exprpb.Decl_Function: - errMsgs = append(errMsgs, e.setFunction(sanitizeFunction(decl))...) - } + for _, d := range declarations { + errMsgs = append(errMsgs, e.addIdent(d)) + } + return formatError(errMsgs) +} + +// AddFunctions configures the checker with a list of function declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error { + errMsgs := make([]errorMsg, 0) + for _, d := range declarations { + errMsgs = append(errMsgs, e.setFunction(d)...) } return formatError(errMsgs) } // LookupIdent returns a Decl proto for typeName as an identifier in the Env. // Returns nil if no such identifier is found in the Env. -func (e *Env) LookupIdent(name string) *exprpb.Decl { +func (e *Env) LookupIdent(name string) *decls.VariableDecl { for _, candidate := range e.container.ResolveCandidateNames(name) { if ident := e.declarations.FindIdent(candidate); ident != nil { return ident @@ -139,8 +140,8 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl { // Next try to import the name as a reference to a message type. If found, // the declaration is added to the outest (global) scope of the // environment, so next time we can access it faster. - if t, found := e.provider.FindType(candidate); found { - decl := decls.NewVar(candidate, t) + if t, found := e.provider.FindStructType(candidate); found { + decl := decls.NewVariable(candidate, t) e.declarations.AddIdent(decl) return decl } @@ -148,11 +149,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl { // Next try to import this as an enum value by splitting the name in a type prefix and // the enum inside. if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { - decl := decls.NewIdent(candidate, - decls.Int, - &exprpb.Constant{ - ConstantKind: &exprpb.Constant_Int64Value{ - Int64Value: int64(enumValue.(types.Int))}}) + decl := decls.NewConstant(candidate, types.IntType, enumValue) e.declarations.AddIdent(decl) return decl } @@ -162,7 +159,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl { // LookupFunction returns a Decl proto for typeName as a function in env. // Returns nil if no such function is found in env. -func (e *Env) LookupFunction(name string) *exprpb.Decl { +func (e *Env) LookupFunction(name string) *decls.FunctionDecl { for _, candidate := range e.container.ResolveCandidateNames(name) { if fn := e.declarations.FindFunction(candidate); fn != nil { return fn @@ -171,88 +168,46 @@ func (e *Env) LookupFunction(name string) *exprpb.Decl { return nil } -// addOverload adds overload to function declaration f. -// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro. -func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg { - errMsgs := make([]errorMsg, 0) - function := f.GetFunction() - emptyMappings := newMapping() - overloadFunction := decls.NewFunctionType(overload.GetResultType(), - overload.GetParams()...) - overloadErased := substitute(emptyMappings, overloadFunction, true) - for _, existing := range function.GetOverloads() { - existingFunction := decls.NewFunctionType(existing.GetResultType(), existing.GetParams()...) - existingErased := substitute(emptyMappings, existingFunction, true) - overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil || - isAssignable(emptyMappings, existingErased, overloadErased) != nil - if overlap && - overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() { - errMsgs = append(errMsgs, - overlappingOverloadError(f.Name, - overload.GetOverloadId(), overloadFunction, - existing.GetOverloadId(), existingFunction)) - } - } - - for _, macro := range parser.AllMacros { - if macro.Function() == f.Name && - macro.IsReceiverStyle() == overload.GetIsInstanceFunction() && - macro.ArgCount() == len(overload.GetParams()) { - errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount())) - } - } - if len(errMsgs) > 0 { - return errMsgs - } - function.Overloads = append(function.GetOverloads(), overload) - return errMsgs -} - // setFunction adds the function Decl to the Env. // Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. // If overload overlaps with an existing overload, adds to the errors in the Env instead. -func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg { - errorMsgs := make([]errorMsg, 0) - overloads := decl.GetFunction().GetOverloads() - current := e.declarations.FindFunction(decl.Name) - if current == nil { - //Add the function declaration without overloads and check the overloads below. - current = decls.NewFunction(decl.Name) - } else { - existingOverloads := map[string]*exprpb.Decl_FunctionDecl_Overload{} - for _, overload := range current.GetFunction().GetOverloads() { - existingOverloads[overload.GetOverloadId()] = overload +func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg { + errMsgs := make([]errorMsg, 0) + current := e.declarations.FindFunction(fn.Name()) + if current != nil { + var err error + current, err = current.Merge(fn) + if err != nil { + return append(errMsgs, errorMsg(err.Error())) } - newOverloads := []*exprpb.Decl_FunctionDecl_Overload{} - for _, overload := range overloads { - existing, found := existingOverloads[overload.GetOverloadId()] - if !found || !overloadsEqual(existing, overload) { - newOverloads = append(newOverloads, overload) + } else { + current = fn + } + for _, overload := range current.OverloadDecls() { + for _, macro := range parser.AllMacros { + if macro.Function() == current.Name() && + macro.IsReceiverStyle() == overload.IsMemberFunction() && + macro.ArgCount() == len(overload.ArgTypes()) { + errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount())) } } - overloads = newOverloads - if len(newOverloads) == 0 { - return errorMsgs + if len(errMsgs) > 0 { + return errMsgs } - // Copy on write since we don't know where this original definition came from. - current = proto.Clone(current).(*exprpb.Decl) } e.declarations.SetFunction(current) - for _, overload := range overloads { - errorMsgs = append(errorMsgs, e.addOverload(current, overload)...) - } - return errorMsgs + return errMsgs } // addIdent adds the Decl to the declarations in the Env. // Returns a non-empty errorMsg if the identifier is already declared in the scope. -func (e *Env) addIdent(decl *exprpb.Decl) errorMsg { - current := e.declarations.FindIdentInScope(decl.Name) +func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg { + current := e.declarations.FindIdentInScope(decl.Name()) if current != nil { - if proto.Equal(current, decl) { + if current.DeclarationIsEquivalent(decl) { return "" } - return overlappingIdentifierError(decl.Name) + return overlappingIdentifierError(decl.Name()) } e.declarations.AddIdent(decl) return "" @@ -264,111 +219,9 @@ func (e *Env) isOverloadDisabled(overloadID string) bool { return found } -// overloadsEqual returns whether two overloads have identical signatures. -// -// type parameter names are ignored as they may be specified in any order and have no bearing on overload -// equivalence -func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool { - return o1.GetOverloadId() == o2.GetOverloadId() && - o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() && - paramsEqual(o1.GetParams(), o2.GetParams()) && - proto.Equal(o1.GetResultType(), o2.GetResultType()) -} - -// paramsEqual returns whether two lists have equal length and all types are equal -func paramsEqual(p1, p2 []*exprpb.Type) bool { - if len(p1) != len(p2) { - return false - } - for i, a := range p1 { - b := p2[i] - if !proto.Equal(a, b) { - return false - } - } - return true -} - -// sanitizeFunction replaces well-known types referenced by message name with their equivalent -// CEL built-in type instances. -func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl { - fn := decl.GetFunction() - // Determine whether the declaration requires replacements from proto-based message type - // references to well-known CEL type references. - var needsSanitizing bool - for _, o := range fn.GetOverloads() { - if isObjectWellKnownType(o.GetResultType()) { - needsSanitizing = true - break - } - for _, p := range o.GetParams() { - if isObjectWellKnownType(p) { - needsSanitizing = true - break - } - } - } - - // Early return if the declaration requires no modification. - if !needsSanitizing { - return decl - } - - // Sanitize all of the overloads if any overload requires an update to its type references. - overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads())) - for i, o := range fn.GetOverloads() { - rt := o.GetResultType() - if isObjectWellKnownType(rt) { - rt = getObjectWellKnownType(rt) - } - params := make([]*exprpb.Type, len(o.GetParams())) - copy(params, o.GetParams()) - for j, p := range params { - if isObjectWellKnownType(p) { - params[j] = getObjectWellKnownType(p) - } - } - // If sanitized, replace the overload definition. - if o.IsInstanceFunction { - overloads[i] = - decls.NewInstanceOverload(o.GetOverloadId(), params, rt) - } else { - overloads[i] = - decls.NewOverload(o.GetOverloadId(), params, rt) - } - } - return decls.NewFunction(decl.GetName(), overloads...) -} - -// sanitizeIdent replaces the identifier's well-known types referenced by message name with -// references to CEL built-in type instances. -func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl { - id := decl.GetIdent() - t := id.GetType() - if !isObjectWellKnownType(t) { - return decl - } - return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue()) -} - -// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name -// that corresponds the message name of a built-in CEL type. -func isObjectWellKnownType(t *exprpb.Type) bool { - if kindOf(t) != kindObject { - return false - } - _, found := pb.CheckedWellKnowns[t.GetMessageType()] - return found -} - -// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name. -func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type { - return pb.CheckedWellKnowns[t.GetMessageType()] -} - // validatedDeclarations returns a reference to the validated variable and function declaration scope stack. // must be copied before use. -func (e *Env) validatedDeclarations() *decls.Scopes { +func (e *Env) validatedDeclarations() *Scopes { return e.declarations } @@ -402,19 +255,6 @@ func overlappingIdentifierError(name string) errorMsg { return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) } -func overlappingOverloadError(name string, - overloadID1 string, f1 *exprpb.Type, - overloadID2 string, f2 *exprpb.Type) errorMsg { - return errorMsg(fmt.Sprintf( - "overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+ - "cannot be distinguished from '%s' with overloadId: '%s')", - name, - FormatCheckedType(f1), - overloadID1, - FormatCheckedType(f2), - overloadID2)) -} - func overlappingMacroError(name string, argCount int) errorMsg { return errorMsg(fmt.Sprintf( "overlapping macro for name '%s' with %d args", name, argCount)) diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go index 0014f9abe..c2b96498d 100644 --- a/vendor/github.com/google/cel-go/checker/errors.go +++ b/vendor/github.com/google/cel-go/checker/errors.go @@ -15,82 +15,78 @@ package checker import ( + "reflect" + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // typeErrors is a specialization of Errors. type typeErrors struct { - *common.Errors + errs *common.Errors +} + +func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'", + name, FormatCELType(field), FormatCELType(value)) +} + +func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) { + e.errs.ReportErrorAtID(id, l, + "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) +} + +func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { + signature := formatFunctionDeclType(nil, args, isInstance) + e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature) } -func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) { - e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container) +func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", + FormatCELType(t)) } -func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) { - e.ReportError(l, "type '%s' does not support field selection", t) +func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) { + e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) } -func (e *typeErrors) undefinedField(l common.Location, field string) { - e.ReportError(l, "undefined field '%s'", field) +func (e *typeErrors) notAType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName) } -func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) { - signature := formatFunction(nil, args, isInstance) - e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature) +func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) } -func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) { - e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t) +func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) { + e.errs.ReportErrorAtID(id, l, + "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) } -func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) { - e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t)) +func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t)) } -func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) { - e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'", - name, FormatCheckedType(field), FormatCheckedType(value)) +func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'", + FormatCELType(expected), FormatCELType(actual)) } -func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) { - e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName) +func (e *typeErrors) undefinedField(id int64, l common.Location, field string) { + e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field) } -func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) { - e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", - FormatCheckedType(t)) +func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) { + e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container) } -func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) { - e.ReportError(l, "expected type '%s' but found '%s'", - FormatCheckedType(expected), FormatCheckedType(actual)) +func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) } -func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { - result := "" - if isInstance { - target := argTypes[0] - argTypes = argTypes[1:] - - result += FormatCheckedType(target) - result += "." - } - - result += "(" - for i, arg := range argTypes { - if i > 0 { - result += ", " - } - result += FormatCheckedType(arg) - } - result += ")" - if resultType != nil { - result += " -> " - result += FormatCheckedType(resultType) - } - - return result +func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) { + e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex)) } diff --git a/vendor/github.com/google/cel-go/checker/format.go b/vendor/github.com/google/cel-go/checker/format.go new file mode 100644 index 000000000..95842905e --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/format.go @@ -0,0 +1,216 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +const ( + kindUnknown = iota + 1 + kindError + kindFunction + kindDyn + kindPrimitive + kindWellKnown + kindWrapper + kindNull + kindAbstract + kindType + kindList + kindMap + kindObject + kindTypeParam +) + +// FormatCheckedType converts a type message into a string representation. +func FormatCheckedType(t *exprpb.Type) string { + switch kindOf(t) { + case kindDyn: + return "dyn" + case kindFunction: + return formatFunctionExprType(t.GetFunction().GetResultType(), + t.GetFunction().GetArgTypes(), + false) + case kindList: + return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) + case kindObject: + return t.GetMessageType() + case kindMap: + return fmt.Sprintf("map(%s, %s)", + FormatCheckedType(t.GetMapType().GetKeyType()), + FormatCheckedType(t.GetMapType().GetValueType())) + case kindNull: + return "null" + case kindPrimitive: + switch t.GetPrimitive() { + case exprpb.Type_UINT64: + return "uint" + case exprpb.Type_INT64: + return "int" + } + return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") + case kindType: + if t.GetType() == nil || t.GetType().GetTypeKind() == nil { + return "type" + } + return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) + case kindWellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return "any" + case exprpb.Type_DURATION: + return "duration" + case exprpb.Type_TIMESTAMP: + return "timestamp" + } + case kindWrapper: + return fmt.Sprintf("wrapper(%s)", + FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper()))) + case kindError: + return "!error!" + case kindTypeParam: + return t.GetTypeParam() + case kindAbstract: + at := t.GetAbstractType() + params := at.GetParameterTypes() + paramStrs := make([]string, len(params)) + for i, p := range params { + paramStrs[i] = FormatCheckedType(p) + } + return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) + } + return t.String() +} + +type formatter func(any) string + +// FormatCELType formats a types.Type value to a string representation. +// +// The type formatting is identical to FormatCheckedType. +func FormatCELType(t any) string { + dt := t.(*types.Type) + switch dt.Kind() { + case types.AnyKind: + return "any" + case types.DurationKind: + return "duration" + case types.ErrorKind: + return "!error!" + case types.NullTypeKind: + return "null" + case types.TimestampKind: + return "timestamp" + case types.TypeParamKind: + return dt.TypeName() + case types.OpaqueKind: + if dt.TypeName() == "function" { + // There is no explicit function type in the new types representation, so information like + // whether the function is a member function is absent. + return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false) + } + case types.UnspecifiedKind: + return "" + } + if len(dt.Parameters()) == 0 { + return dt.DeclaredTypeName() + } + paramTypeNames := make([]string, 0, len(dt.Parameters())) + for _, p := range dt.Parameters() { + paramTypeNames = append(paramTypeNames, FormatCELType(p)) + } + return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", ")) +} + +func formatExprType(t any) string { + if t == nil { + return "" + } + return FormatCheckedType(t.(*exprpb.Type)) +} + +func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { + return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType) +} + +func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string { + return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType) +} + +func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string { + result := "" + if isInstance { + target := argTypes[0] + argTypes = argTypes[1:] + result += format(target) + result += "." + } + result += "(" + for i, arg := range argTypes { + if i > 0 { + result += ", " + } + result += format(arg) + } + result += ")" + rt := format(resultType) + if rt != "" { + result += " -> " + result += rt + } + return result +} + +// kindOf returns the kind of the type as defined in the checked.proto. +func kindOf(t *exprpb.Type) int { + if t == nil || t.TypeKind == nil { + return kindUnknown + } + switch t.GetTypeKind().(type) { + case *exprpb.Type_Error: + return kindError + case *exprpb.Type_Function: + return kindFunction + case *exprpb.Type_Dyn: + return kindDyn + case *exprpb.Type_Primitive: + return kindPrimitive + case *exprpb.Type_WellKnown: + return kindWellKnown + case *exprpb.Type_Wrapper: + return kindWrapper + case *exprpb.Type_Null: + return kindNull + case *exprpb.Type_Type: + return kindType + case *exprpb.Type_ListType_: + return kindList + case *exprpb.Type_MapType_: + return kindMap + case *exprpb.Type_MessageType: + return kindObject + case *exprpb.Type_TypeParam: + return kindTypeParam + case *exprpb.Type_AbstractType_: + return kindAbstract + } + return kindUnknown +} diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go index fbc55a28d..8163a908a 100644 --- a/vendor/github.com/google/cel-go/checker/mapping.go +++ b/vendor/github.com/google/cel-go/checker/mapping.go @@ -15,25 +15,25 @@ package checker import ( - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" ) type mapping struct { - mapping map[string]*exprpb.Type + mapping map[string]*types.Type } func newMapping() *mapping { return &mapping{ - mapping: make(map[string]*exprpb.Type), + mapping: make(map[string]*types.Type), } } -func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) { - m.mapping[typeKey(from)] = to +func (m *mapping) add(from, to *types.Type) { + m.mapping[FormatCELType(from)] = to } -func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) { - if r, found := m.mapping[typeKey(from)]; found { +func (m *mapping) find(from *types.Type) (*types.Type, bool) { + if r, found := m.mapping[FormatCELType(from)]; found { return r, found } return nil, false diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go index cded00a66..0560c3813 100644 --- a/vendor/github.com/google/cel-go/checker/options.go +++ b/vendor/github.com/google/cel-go/checker/options.go @@ -14,12 +14,10 @@ package checker -import "github.com/google/cel-go/checker/decls" - type options struct { crossTypeNumericComparisons bool homogeneousAggregateLiterals bool - validatedDeclarations *decls.Scopes + validatedDeclarations *Scopes } // Option is a functional option for configuring the type-checker @@ -34,15 +32,6 @@ func CrossTypeNumericComparisons(enabled bool) Option { } } -// HomogeneousAggregateLiterals toggles support for constructing lists and maps whose elements all -// have the same type. -func HomogeneousAggregateLiterals(enabled bool) Option { - return func(opts *options) error { - opts.homogeneousAggregateLiterals = enabled - return nil - } -} - // ValidatedDeclarations provides a references to validated declarations which will be copied // into new checker instances. func ValidatedDeclarations(env *Env) Option { diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go index 0cecc5210..15cba06ee 100644 --- a/vendor/github.com/google/cel-go/checker/printer.go +++ b/vendor/github.com/google/cel-go/checker/printer.go @@ -15,6 +15,8 @@ package checker import ( + "sort" + "github.com/google/cel-go/common/debug" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -47,6 +49,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string { if len(ref.GetOverloadId()) == 0 { result += "^" + ref.Name } else { + sort.Strings(ref.GetOverloadId()) for i, overload := range ref.GetOverloadId() { if i == 0 { result += "^" diff --git a/vendor/github.com/google/cel-go/checker/decls/scopes.go b/vendor/github.com/google/cel-go/checker/scopes.go similarity index 81% rename from vendor/github.com/google/cel-go/checker/decls/scopes.go rename to vendor/github.com/google/cel-go/checker/scopes.go index 608bca3e5..8bb73ddb6 100644 --- a/vendor/github.com/google/cel-go/checker/decls/scopes.go +++ b/vendor/github.com/google/cel-go/checker/scopes.go @@ -12,9 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package decls +package checker -import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +import ( + "github.com/google/cel-go/common/decls" +) // Scopes represents nested Decl sets where the Scopes value contains a Groups containing all // identifiers in scope and an optional parent representing outer scopes. @@ -25,9 +27,9 @@ type Scopes struct { scopes *Group } -// NewScopes creates a new, empty Scopes. +// newScopes creates a new, empty Scopes. // Some operations can't be safely performed until a Group is added with Push. -func NewScopes() *Scopes { +func newScopes() *Scopes { return &Scopes{ scopes: newGroup(), } @@ -35,7 +37,7 @@ func NewScopes() *Scopes { // Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil. func (s *Scopes) Copy() *Scopes { - cpy := NewScopes() + cpy := newScopes() if s == nil { return cpy } @@ -66,14 +68,14 @@ func (s *Scopes) Pop() *Scopes { // AddIdent adds the ident Decl in the current scope. // Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. -func (s *Scopes) AddIdent(decl *exprpb.Decl) { - s.scopes.idents[decl.Name] = decl +func (s *Scopes) AddIdent(decl *decls.VariableDecl) { + s.scopes.idents[decl.Name()] = decl } // FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be // found. // Note: The search is performed from innermost to outermost. -func (s *Scopes) FindIdent(name string) *exprpb.Decl { +func (s *Scopes) FindIdent(name string) *decls.VariableDecl { if ident, found := s.scopes.idents[name]; found { return ident } @@ -86,7 +88,7 @@ func (s *Scopes) FindIdent(name string) *exprpb.Decl { // FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or // nil if one does not exist. // Note: The search is only performed on the current scope and does not search outer scopes. -func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl { +func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { if ident, found := s.scopes.idents[name]; found { return ident } @@ -95,14 +97,14 @@ func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl { // SetFunction adds the function Decl to the current scope. // Note: Any previous entry for a function in the current scope with the same name is overwritten. -func (s *Scopes) SetFunction(fn *exprpb.Decl) { - s.scopes.functions[fn.Name] = fn +func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { + s.scopes.functions[fn.Name()] = fn } // FindFunction finds the first function Decl with a matching name in Scopes. // The search is performed from innermost to outermost. // Returns nil if no such function in Scopes. -func (s *Scopes) FindFunction(name string) *exprpb.Decl { +func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { if fn, found := s.scopes.functions[name]; found { return fn } @@ -116,16 +118,16 @@ func (s *Scopes) FindFunction(name string) *exprpb.Decl { // Contains separate namespaces for identifier and function Decls. // (Should be named "Scope" perhaps?) type Group struct { - idents map[string]*exprpb.Decl - functions map[string]*exprpb.Decl + idents map[string]*decls.VariableDecl + functions map[string]*decls.FunctionDecl } // copy creates a new Group instance with a shallow copy of the variables and functions. // If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write. func (g *Group) copy() *Group { cpy := &Group{ - idents: make(map[string]*exprpb.Decl, len(g.idents)), - functions: make(map[string]*exprpb.Decl, len(g.functions)), + idents: make(map[string]*decls.VariableDecl, len(g.idents)), + functions: make(map[string]*decls.FunctionDecl, len(g.functions)), } for n, id := range g.idents { cpy.idents[n] = id @@ -139,7 +141,7 @@ func (g *Group) copy() *Group { // newGroup creates a new Group with empty maps for identifiers and functions. func newGroup() *Group { return &Group{ - idents: make(map[string]*exprpb.Decl), - functions: make(map[string]*exprpb.Decl), + idents: make(map[string]*decls.VariableDecl), + functions: make(map[string]*decls.FunctionDecl), } } diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go index e64337ba4..11b35b80e 100644 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ b/vendor/github.com/google/cel-go/checker/standard.go @@ -15,480 +15,21 @@ package checker import ( - "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/stdlib" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) -var ( - standardDeclarations []*exprpb.Decl -) - -func init() { - // Some shortcuts we use when building declarations. - paramA := decls.NewTypeParamType("A") - typeParamAList := []string{"A"} - listOfA := decls.NewListType(paramA) - paramB := decls.NewTypeParamType("B") - typeParamABList := []string{"A", "B"} - mapOfAB := decls.NewMapType(paramA, paramB) - - var idents []*exprpb.Decl - for _, t := range []*exprpb.Type{ - decls.Int, decls.Uint, decls.Bool, - decls.Double, decls.Bytes, decls.String} { - idents = append(idents, - decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t))) - } - idents = append(idents, - decls.NewVar("list", decls.NewTypeType(listOfA)), - decls.NewVar("map", decls.NewTypeType(mapOfAB)), - decls.NewVar("null_type", decls.NewTypeType(decls.Null)), - decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil)))) - - standardDeclarations = append(standardDeclarations, idents...) - standardDeclarations = append(standardDeclarations, []*exprpb.Decl{ - // Booleans - decls.NewFunction(operators.Conditional, - decls.NewParameterizedOverload(overloads.Conditional, - []*exprpb.Type{decls.Bool, paramA, paramA}, paramA, - typeParamAList)), - - decls.NewFunction(operators.LogicalAnd, - decls.NewOverload(overloads.LogicalAnd, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.LogicalOr, - decls.NewOverload(overloads.LogicalOr, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.LogicalNot, - decls.NewOverload(overloads.LogicalNot, - []*exprpb.Type{decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.NotStrictlyFalse, - decls.NewOverload(overloads.NotStrictlyFalse, - []*exprpb.Type{decls.Bool}, decls.Bool)), - - decls.NewFunction(operators.Equals, - decls.NewParameterizedOverload(overloads.Equals, - []*exprpb.Type{paramA, paramA}, decls.Bool, - typeParamAList)), - - decls.NewFunction(operators.NotEquals, - decls.NewParameterizedOverload(overloads.NotEquals, - []*exprpb.Type{paramA, paramA}, decls.Bool, - typeParamAList)), - - // Algebra. - - decls.NewFunction(operators.Subtract, - decls.NewOverload(overloads.SubtractInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.SubtractUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.SubtractDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double), - decls.NewOverload(overloads.SubtractTimestampTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration), - decls.NewOverload(overloads.SubtractTimestampDuration, - []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), - decls.NewOverload(overloads.SubtractDurationDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), - - decls.NewFunction(operators.Multiply, - decls.NewOverload(overloads.MultiplyInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.MultiplyUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.MultiplyDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), - - decls.NewFunction(operators.Divide, - decls.NewOverload(overloads.DivideInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.DivideUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.DivideDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), - - decls.NewFunction(operators.Modulo, - decls.NewOverload(overloads.ModuloInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.ModuloUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)), - - decls.NewFunction(operators.Add, - decls.NewOverload(overloads.AddInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Int), - decls.NewOverload(overloads.AddUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), - decls.NewOverload(overloads.AddDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Double), - decls.NewOverload(overloads.AddString, - []*exprpb.Type{decls.String, decls.String}, decls.String), - decls.NewOverload(overloads.AddBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes), - decls.NewParameterizedOverload(overloads.AddList, - []*exprpb.Type{listOfA, listOfA}, listOfA, - typeParamAList), - decls.NewOverload(overloads.AddTimestampDuration, - []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), - decls.NewOverload(overloads.AddDurationTimestamp, - []*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp), - decls.NewOverload(overloads.AddDurationDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), - - decls.NewFunction(operators.Negate, - decls.NewOverload(overloads.NegateInt64, - []*exprpb.Type{decls.Int}, decls.Int), - decls.NewOverload(overloads.NegateDouble, - []*exprpb.Type{decls.Double}, decls.Double)), - - // Index. - - decls.NewFunction(operators.Index, - decls.NewParameterizedOverload(overloads.IndexList, - []*exprpb.Type{listOfA, decls.Int}, paramA, - typeParamAList), - decls.NewParameterizedOverload(overloads.IndexMap, - []*exprpb.Type{mapOfAB, paramA}, paramB, - typeParamABList)), - - // Collections. - - decls.NewFunction(overloads.Size, - decls.NewInstanceOverload(overloads.SizeStringInst, - []*exprpb.Type{decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.SizeBytesInst, - []*exprpb.Type{decls.Bytes}, decls.Int), - decls.NewParameterizedInstanceOverload(overloads.SizeListInst, - []*exprpb.Type{listOfA}, decls.Int, typeParamAList), - decls.NewParameterizedInstanceOverload(overloads.SizeMapInst, - []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList), - decls.NewOverload(overloads.SizeString, - []*exprpb.Type{decls.String}, decls.Int), - decls.NewOverload(overloads.SizeBytes, - []*exprpb.Type{decls.Bytes}, decls.Int), - decls.NewParameterizedOverload(overloads.SizeList, - []*exprpb.Type{listOfA}, decls.Int, typeParamAList), - decls.NewParameterizedOverload(overloads.SizeMap, - []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)), - - decls.NewFunction(operators.In, - decls.NewParameterizedOverload(overloads.InList, - []*exprpb.Type{paramA, listOfA}, decls.Bool, - typeParamAList), - decls.NewParameterizedOverload(overloads.InMap, - []*exprpb.Type{paramA, mapOfAB}, decls.Bool, - typeParamABList)), - - // Deprecated 'in()' function. - - decls.NewFunction(overloads.DeprecatedIn, - decls.NewParameterizedOverload(overloads.InList, - []*exprpb.Type{paramA, listOfA}, decls.Bool, - typeParamAList), - decls.NewParameterizedOverload(overloads.InMap, - []*exprpb.Type{paramA, mapOfAB}, decls.Bool, - typeParamABList)), - - // Conversions to type. - - decls.NewFunction(overloads.TypeConvertType, - decls.NewParameterizedOverload(overloads.TypeConvertType, - []*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)), - - // Conversions to int. - - decls.NewFunction(overloads.TypeConvertInt, - decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int), - decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int), - decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int), - decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int), - decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)), - - // Conversions to uint. - - decls.NewFunction(overloads.TypeConvertUint, - decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint), - decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint), - decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint), - decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)), - - // Conversions to double. - - decls.NewFunction(overloads.TypeConvertDouble, - decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double), - decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double), - decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double), - decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)), - - // Conversions to bool. - - decls.NewFunction(overloads.TypeConvertBool, - decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool), - decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)), - - // Conversions to string. - - decls.NewFunction(overloads.TypeConvertString, - decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String), - decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String), - decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String), - decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String), - decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String), - decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String), - decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String), - decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)), - - // Conversions to bytes. - - decls.NewFunction(overloads.TypeConvertBytes, - decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes), - decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)), - - // Conversions to timestamps. - - decls.NewFunction(overloads.TypeConvertTimestamp, - decls.NewOverload(overloads.TimestampToTimestamp, - []*exprpb.Type{decls.Timestamp}, decls.Timestamp), - decls.NewOverload(overloads.StringToTimestamp, - []*exprpb.Type{decls.String}, decls.Timestamp), - decls.NewOverload(overloads.IntToTimestamp, - []*exprpb.Type{decls.Int}, decls.Timestamp)), - - // Conversions to durations. - - decls.NewFunction(overloads.TypeConvertDuration, - decls.NewOverload(overloads.DurationToDuration, - []*exprpb.Type{decls.Duration}, decls.Duration), - decls.NewOverload(overloads.StringToDuration, - []*exprpb.Type{decls.String}, decls.Duration), - decls.NewOverload(overloads.IntToDuration, - []*exprpb.Type{decls.Int}, decls.Duration)), - - // Conversions to Dyn. - - decls.NewFunction(overloads.TypeConvertDyn, - decls.NewParameterizedOverload(overloads.ToDyn, - []*exprpb.Type{paramA}, decls.Dyn, - typeParamAList)), - - // String functions. - - decls.NewFunction(overloads.Contains, - decls.NewInstanceOverload(overloads.ContainsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.EndsWith, - decls.NewInstanceOverload(overloads.EndsWithString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.Matches, - decls.NewOverload(overloads.Matches, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewInstanceOverload(overloads.MatchesString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - decls.NewFunction(overloads.StartsWith, - decls.NewInstanceOverload(overloads.StartsWithString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool)), - - // Date/time functions. - - decls.NewFunction(overloads.TimeGetFullYear, - decls.NewInstanceOverload(overloads.TimestampToYear, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToYearWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMonth, - decls.NewInstanceOverload(overloads.TimestampToMonth, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMonthWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfYear, - decls.NewInstanceOverload(overloads.TimestampToDayOfYear, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfMonth, - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDate, - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetDayOfWeek, - decls.NewInstanceOverload(overloads.TimestampToDayOfWeek, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), - - decls.NewFunction(overloads.TimeGetHours, - decls.NewInstanceOverload(overloads.TimestampToHours, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToHoursWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToHours, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMinutes, - decls.NewInstanceOverload(overloads.TimestampToMinutes, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToMinutes, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetSeconds, - decls.NewInstanceOverload(overloads.TimestampToSeconds, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToSeconds, - []*exprpb.Type{decls.Duration}, decls.Int)), - - decls.NewFunction(overloads.TimeGetMilliseconds, - decls.NewInstanceOverload(overloads.TimestampToMilliseconds, - []*exprpb.Type{decls.Timestamp}, decls.Int), - decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz, - []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), - decls.NewInstanceOverload(overloads.DurationToMilliseconds, - []*exprpb.Type{decls.Duration}, decls.Int)), - - // Relations. - decls.NewFunction(operators.Less, - decls.NewOverload(overloads.LessBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.LessInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessInt64Double, - []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessInt64Uint64, - []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessUint64Double, - []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessUint64Int64, - []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessDoubleInt64, - []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessDoubleUint64, - []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.LessBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.LessTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.LessDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.LessEquals, - decls.NewOverload(overloads.LessEqualsBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.LessEqualsInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessEqualsInt64Double, - []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessEqualsInt64Uint64, - []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessEqualsUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessEqualsUint64Double, - []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessEqualsUint64Int64, - []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDoubleInt64, - []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDoubleUint64, - []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.LessEqualsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.LessEqualsBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.LessEqualsTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.LessEqualsDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.Greater, - decls.NewOverload(overloads.GreaterBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.GreaterInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterInt64Double, - []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterInt64Uint64, - []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterUint64Double, - []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterUint64Int64, - []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterDoubleInt64, - []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterDoubleUint64, - []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.GreaterBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.GreaterTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.GreaterDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - - decls.NewFunction(operators.GreaterEquals, - decls.NewOverload(overloads.GreaterEqualsBool, - []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsInt64, - []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsInt64Double, - []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsInt64Uint64, - []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsUint64, - []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsUint64Double, - []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsUint64Int64, - []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDouble, - []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDoubleInt64, - []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDoubleUint64, - []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsString, - []*exprpb.Type{decls.String, decls.String}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsBytes, - []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsTimestamp, - []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), - decls.NewOverload(overloads.GreaterEqualsDuration, - []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), - }...) +// StandardFunctions returns the Decls for all functions in the evaluator. +// +// Deprecated: prefer stdlib.FunctionExprDecls() +func StandardFunctions() []*exprpb.Decl { + return stdlib.FunctionExprDecls() } -// StandardDeclarations returns the Decls for all functions and constants in the evaluator. -func StandardDeclarations() []*exprpb.Decl { - return standardDeclarations +// StandardTypes returns the set of type identifiers for standard library types. +// +// Deprecated: prefer stdlib.TypeExprDecls() +func StandardTypes() []*exprpb.Decl { + return stdlib.TypeExprDecls() } diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go index 28d21c9d9..e2373d1b7 100644 --- a/vendor/github.com/google/cel-go/checker/types.go +++ b/vendor/github.com/google/cel-go/checker/types.go @@ -15,154 +15,54 @@ package checker import ( - "fmt" - "strings" - - "github.com/google/cel-go/checker/decls" - - "google.golang.org/protobuf/proto" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" ) -const ( - kindUnknown = iota + 1 - kindError - kindFunction - kindDyn - kindPrimitive - kindWellKnown - kindWrapper - kindNull - kindAbstract - kindType - kindList - kindMap - kindObject - kindTypeParam -) - -// FormatCheckedType converts a type message into a string representation. -func FormatCheckedType(t *exprpb.Type) string { - switch kindOf(t) { - case kindDyn: - return "dyn" - case kindFunction: - return formatFunction(t.GetFunction().GetResultType(), - t.GetFunction().GetArgTypes(), - false) - case kindList: - return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) - case kindObject: - return t.GetMessageType() - case kindMap: - return fmt.Sprintf("map(%s, %s)", - FormatCheckedType(t.GetMapType().GetKeyType()), - FormatCheckedType(t.GetMapType().GetValueType())) - case kindNull: - return "null" - case kindPrimitive: - switch t.GetPrimitive() { - case exprpb.Type_UINT64: - return "uint" - case exprpb.Type_INT64: - return "int" - } - return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") - case kindType: - if t.GetType() == nil { - return "type" - } - return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) - case kindWellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return "any" - case exprpb.Type_DURATION: - return "duration" - case exprpb.Type_TIMESTAMP: - return "timestamp" - } - case kindWrapper: - return fmt.Sprintf("wrapper(%s)", - FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper()))) - case kindError: - return "!error!" - case kindTypeParam: - return t.GetTypeParam() - case kindAbstract: - at := t.GetAbstractType() - params := at.GetParameterTypes() - paramStrs := make([]string, len(params)) - for i, p := range params { - paramStrs[i] = FormatCheckedType(p) - } - return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) - } - return t.String() -} - // isDyn returns true if the input t is either type DYN or a well-known ANY message. -func isDyn(t *exprpb.Type) bool { +func isDyn(t *types.Type) bool { // Note: object type values that are well-known and map to a DYN value in practice // are sanitized prior to being added to the environment. - switch kindOf(t) { - case kindDyn: + switch t.Kind() { + case types.DynKind, types.AnyKind: return true - case kindWellKnown: - return t.GetWellKnown() == exprpb.Type_ANY default: return false } } // isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. -func isDynOrError(t *exprpb.Type) bool { +func isDynOrError(t *types.Type) bool { return isError(t) || isDyn(t) } -func isError(t *exprpb.Type) bool { - return kindOf(t) == kindError +func isError(t *types.Type) bool { + return t.Kind() == types.ErrorKind } -func isOptional(t *exprpb.Type) bool { - if kindOf(t) == kindAbstract { - at := t.GetAbstractType() - return at.GetName() == "optional" +func isOptional(t *types.Type) bool { + if t.Kind() == types.OpaqueKind { + return t.TypeName() == "optional" } return false } -func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) { +func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) { if isOptional(t) { - at := t.GetAbstractType() - return at.GetParameterTypes()[0], true + return t.Parameters()[0], true } return t, false } -func maybeUnwrapString(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: - return literal.GetStringValue(), true - } - } - return "", false -} - // isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. // A type is less specific if it matches the other type using the DYN type. -func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool { - kind1, kind2 := kindOf(t1), kindOf(t2) +func isEqualOrLessSpecific(t1, t2 *types.Type) bool { + kind1, kind2 := t1.Kind(), t2.Kind() // The first type is less specific. - if isDyn(t1) || kind1 == kindTypeParam { + if isDyn(t1) || kind1 == types.TypeParamKind { return true } // The first type is not less specific. - if isDyn(t2) || kind2 == kindTypeParam { + if isDyn(t2) || kind2 == types.TypeParamKind { return false } // Types must be of the same kind to be equal. @@ -173,38 +73,34 @@ func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool { // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in // order to return true. switch kind1 { - case kindAbstract: - a1 := t1.GetAbstractType() - a2 := t2.GetAbstractType() - if a1.GetName() != a2.GetName() || - len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) { + case types.OpaqueKind: + if t1.TypeName() != t2.TypeName() || + len(t1.Parameters()) != len(t2.Parameters()) { return false } - for i, p1 := range a1.GetParameterTypes() { - if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) { + for i, p1 := range t1.Parameters() { + if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) { return false } } return true - case kindList: - return isEqualOrLessSpecific(t1.GetListType().GetElemType(), t2.GetListType().GetElemType()) - case kindMap: - m1 := t1.GetMapType() - m2 := t2.GetMapType() - return isEqualOrLessSpecific(m1.GetKeyType(), m2.GetKeyType()) && - isEqualOrLessSpecific(m1.GetValueType(), m2.GetValueType()) - case kindType: + case types.ListKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) + case types.MapKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) && + isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1]) + case types.TypeKind: return true default: - return proto.Equal(t1, t2) + return t1.IsExactType(t2) } } // / internalIsAssignable returns true if t1 is assignable to t2. -func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { +func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { // Process type parameters. - kind1, kind2 := kindOf(t1), kindOf(t2) - if kind2 == kindTypeParam { + kind1, kind2 := t1.Kind(), t2.Kind() + if kind2 == types.TypeParamKind { // If t2 is a valid type substitution for t1, return true. valid, t2HasSub := isValidTypeSubstitution(m, t1, t2) if valid { @@ -217,7 +113,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { } // Otherwise, fall through to check whether t1 is a possible substitution for t2. } - if kind1 == kindTypeParam { + if kind1 == types.TypeParamKind { // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the // possible type substitutions have been searched in both directions. valid, _ := isValidTypeSubstitution(m, t2, t1) @@ -228,40 +124,25 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { if isDynOrError(t1) || isDynOrError(t2) { return true } - - // Test for when the types do not need to agree, but are more specific than dyn. - switch kind1 { - case kindNull: + // Preserve the nullness checks of the legacy type-checker. + if kind1 == types.NullTypeKind { return internalIsAssignableNull(t2) - case kindPrimitive: - return internalIsAssignablePrimitive(t1.GetPrimitive(), t2) - case kindWrapper: - return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2) - default: - if kind1 != kind2 { - return false - } + } + if kind2 == types.NullTypeKind { + return internalIsAssignableNull(t1) } - // Test for when the types must agree. + // Test for when the types do not need to agree, but are more specific than dyn. switch kind1 { - // ERROR, TYPE_PARAM, and DYN handled above. - case kindAbstract: - return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType()) - case kindFunction: - return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction()) - case kindList: - return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType()) - case kindMap: - return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType()) - case kindObject: - return t1.GetMessageType() == t2.GetMessageType() - case kindType: - // A type is a type is a type, any additional parameterization of the - // type cannot affect method resolution or assignability. - return true - case kindWellKnown: - return t1.GetWellKnown() == t2.GetWellKnown() + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, + types.AnyKind, types.DurationKind, types.TimestampKind, + types.StructKind: + return t1.IsAssignableType(t2) + case types.TypeKind: + return kind2 == types.TypeKind + case types.OpaqueKind, types.ListKind, types.MapKind: + return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() && + internalIsAssignableList(m, t1.Parameters(), t2.Parameters()) default: return false } @@ -274,16 +155,16 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { // - t2 has a type substitution (t2sub) equal to t1 // - t2 has a type substitution (t2sub) assignable to t1 // - t2 does not occur within t1. -func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) { +func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) { // Early return if the t1 and t2 are the same instance. - kind1, kind2 := kindOf(t1), kindOf(t2) - if kind1 == kind2 && (t1 == t2 || proto.Equal(t1, t2)) { + kind1, kind2 := t1.Kind(), t2.Kind() + if kind1 == kind2 && t1.IsExactType(t2) { return true, true } if t2Sub, found := m.find(t2); found { // Early return if t1 and t2Sub are the same instance as otherwise the mapping // might mark a type as being a subtitution for itself. - if kind1 == kindOf(t2Sub) && (t1 == t2Sub || proto.Equal(t1, t2Sub)) { + if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) { return true, true } // If the types are compatible, pick the more general type and return true @@ -305,28 +186,10 @@ func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub boo return false, false } -// internalIsAssignableAbstractType returns true if the abstract type names agree and all type -// parameters are assignable. -func internalIsAssignableAbstractType(m *mapping, a1 *exprpb.Type_AbstractType, a2 *exprpb.Type_AbstractType) bool { - return a1.GetName() == a2.GetName() && - internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes()) -} - -// internalIsAssignableFunction returns true if the function return type and arg types are -// assignable. -func internalIsAssignableFunction(m *mapping, f1 *exprpb.Type_FunctionType, f2 *exprpb.Type_FunctionType) bool { - f1ArgTypes := flattenFunctionTypes(f1) - f2ArgTypes := flattenFunctionTypes(f2) - if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) { - return true - } - return false -} - // internalIsAssignableList returns true if the element types at each index in the list are // assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be // assignable. -func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool { +func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool { if len(l1) != len(l2) { return false } @@ -338,41 +201,22 @@ func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) return true } -// internalIsAssignableMap returns true if map m1 may be assigned to map m2. -func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool { - if internalIsAssignableList(m, - []*exprpb.Type{m1.GetKeyType(), m1.GetValueType()}, - []*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) { - return true - } - return false -} - // internalIsAssignableNull returns true if the type is nullable. -func internalIsAssignableNull(t *exprpb.Type) bool { - switch kindOf(t) { - case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper: - return true - default: - return false - } +func internalIsAssignableNull(t *types.Type) bool { + return isLegacyNullable(t) || t.IsAssignableType(types.NullType) } -// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper -// for the primitive type. -func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool { - switch kindOf(target) { - case kindPrimitive: - return p == target.GetPrimitive() - case kindWrapper: - return p == target.GetWrapper() - default: - return false +// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation. +func isLegacyNullable(t *types.Type) bool { + switch t.Kind() { + case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind: + return true } + return false } // isAssignable returns an updated type substitution mapping if t1 is assignable to t2. -func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping { +func isAssignable(m *mapping, t1, t2 *types.Type) *mapping { mCopy := m.copy() if internalIsAssignable(mCopy, t1, t2) { return mCopy @@ -381,7 +225,7 @@ func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping { } // isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. -func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping { +func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping { mCopy := m.copy() if internalIsAssignableList(mCopy, l1, l2) { return mCopy @@ -389,44 +233,8 @@ func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping return nil } -// kindOf returns the kind of the type as defined in the checked.proto. -func kindOf(t *exprpb.Type) int { - if t == nil || t.TypeKind == nil { - return kindUnknown - } - switch t.GetTypeKind().(type) { - case *exprpb.Type_Error: - return kindError - case *exprpb.Type_Function: - return kindFunction - case *exprpb.Type_Dyn: - return kindDyn - case *exprpb.Type_Primitive: - return kindPrimitive - case *exprpb.Type_WellKnown: - return kindWellKnown - case *exprpb.Type_Wrapper: - return kindWrapper - case *exprpb.Type_Null: - return kindNull - case *exprpb.Type_Type: - return kindType - case *exprpb.Type_ListType_: - return kindList - case *exprpb.Type_MapType_: - return kindMap - case *exprpb.Type_MessageType: - return kindObject - case *exprpb.Type_TypeParam: - return kindTypeParam - case *exprpb.Type_AbstractType_: - return kindAbstract - } - return kindUnknown -} - // mostGeneral returns the more general of two types which are known to unify. -func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type { +func mostGeneral(t1, t2 *types.Type) *types.Type { if isEqualOrLessSpecific(t1, t2) { return t1 } @@ -436,32 +244,25 @@ func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type { // notReferencedIn checks whether the type doesn't appear directly or transitively within the other // type. This is a standard requirement for type unification, commonly referred to as the "occurs // check". -func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool { - if proto.Equal(t, withinType) { +func notReferencedIn(m *mapping, t, withinType *types.Type) bool { + if t.IsExactType(withinType) { return false } - withinKind := kindOf(withinType) + withinKind := withinType.Kind() switch withinKind { - case kindTypeParam: + case types.TypeParamKind: wtSub, found := m.find(withinType) if !found { return true } return notReferencedIn(m, t, wtSub) - case kindAbstract: - for _, pt := range withinType.GetAbstractType().GetParameterTypes() { + case types.OpaqueKind, types.ListKind, types.MapKind: + for _, pt := range withinType.Parameters() { if !notReferencedIn(m, t, pt) { return false } } return true - case kindList: - return notReferencedIn(m, t, withinType.GetListType().GetElemType()) - case kindMap: - mt := withinType.GetMapType() - return notReferencedIn(m, t, mt.GetKeyType()) && notReferencedIn(m, t, mt.GetValueType()) - case kindWrapper: - return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper())) default: return true } @@ -469,39 +270,25 @@ func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool { // substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type // parameters are replaced by DYN if typeParamToDyn is true. -func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type { +func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { if tSub, found := m.find(t); found { return substitute(m, tSub, typeParamToDyn) } - kind := kindOf(t) - if typeParamToDyn && kind == kindTypeParam { - return decls.Dyn + kind := t.Kind() + if typeParamToDyn && kind == types.TypeParamKind { + return types.DynType } switch kind { - case kindAbstract: - at := t.GetAbstractType() - params := make([]*exprpb.Type, len(at.GetParameterTypes())) - for i, p := range at.GetParameterTypes() { - params[i] = substitute(m, p, typeParamToDyn) - } - return decls.NewAbstractType(at.GetName(), params...) - case kindFunction: - fn := t.GetFunction() - rt := substitute(m, fn.ResultType, typeParamToDyn) - args := make([]*exprpb.Type, len(fn.GetArgTypes())) - for i, a := range fn.ArgTypes { - args[i] = substitute(m, a, typeParamToDyn) - } - return decls.NewFunctionType(rt, args...) - case kindList: - return decls.NewListType(substitute(m, t.GetListType().GetElemType(), typeParamToDyn)) - case kindMap: - mt := t.GetMapType() - return decls.NewMapType(substitute(m, mt.GetKeyType(), typeParamToDyn), - substitute(m, mt.GetValueType(), typeParamToDyn)) - case kindType: - if t.GetType() != nil { - return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn)) + case types.OpaqueKind: + return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...) + case types.ListKind: + return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn)) + case types.MapKind: + return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn), + substitute(m, t.Parameters()[1], typeParamToDyn)) + case types.TypeKind: + if len(t.Parameters()) > 0 { + return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn)) } return t default: @@ -509,21 +296,14 @@ func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type { } } -func typeKey(t *exprpb.Type) string { - return FormatCheckedType(t) +func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type { + subParams := make([]*types.Type, len(typeParams)) + for i, tp := range typeParams { + subParams[i] = substitute(m, tp, typeParamToDyn) + } + return subParams } -// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR -// and returns a slice containing {T1, T2, ..., TN, TR}. -func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type { - argTypes := f.GetArgTypes() - if len(argTypes) == 0 { - return []*exprpb.Type{f.GetResultType()} - } - flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1) - for i, at := range argTypes { - flattend[i] = at - } - flattend[len(argTypes)] = f.GetResultType() - return flattend +func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type { + return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...) } diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel new file mode 100644 index 000000000..7269cdff5 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = [ + "//cel:__subpackages__", + "//checker:__subpackages__", + "//common:__subpackages__", + "//interpreter:__subpackages__", + ], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "ast.go", + "expr.go", + ], + importpath = "github.com/google/cel-go/common/ast", + deps = [ + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "ast_test.go", + "expr_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//checker:go_default_library", + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//parser:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go new file mode 100644 index 000000000..b3c150793 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -0,0 +1,226 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast declares data structures useful for parsed and checked abstract syntax trees +package ast + +import ( + "fmt" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + structpb "google.golang.org/protobuf/types/known/structpb" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information. +type CheckedAST struct { + Expr *exprpb.Expr + SourceInfo *exprpb.SourceInfo + TypeMap map[int64]*types.Type + ReferenceMap map[int64]*ReferenceInfo +} + +// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf. +func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) { + refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap)) + for id, ref := range ast.ReferenceMap { + r, err := ReferenceInfoToReferenceExpr(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap)) + for id, typ := range ast.TypeMap { + t, err := types.TypeToExprType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + return &exprpb.CheckedExpr{ + Expr: ast.Expr, + SourceInfo: ast.SourceInfo, + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance. +func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) { + refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) + for id, ref := range checked.GetReferenceMap() { + r, err := ReferenceExprToReferenceInfo(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) + for id, typ := range checked.GetTypeMap() { + t, err := types.ExprTypeToType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + return &CheckedAST{ + Expr: checked.GetExpr(), + SourceInfo: checked.GetSourceInfo(), + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to +// either a qualified identifier name, a set of overload ids, or a constant value from an enum. +type ReferenceInfo struct { + Name string + OverloadIDs []string + Value ref.Val +} + +// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value. +func NewIdentReference(name string, value ref.Val) *ReferenceInfo { + return &ReferenceInfo{Name: name, Value: value} +} + +// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads. +func NewFunctionReference(overloads ...string) *ReferenceInfo { + info := &ReferenceInfo{} + for _, id := range overloads { + info.AddOverload(id) + } + return info +} + +// AddOverload appends a function overload ID to the ReferenceInfo. +func (r *ReferenceInfo) AddOverload(overloadID string) { + for _, id := range r.OverloadIDs { + if id == overloadID { + return + } + } + r.OverloadIDs = append(r.OverloadIDs, overloadID) +} + +// Equals returns whether two references are identical to each other. +func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { + if r.Name != other.Name { + return false + } + if len(r.OverloadIDs) != len(other.OverloadIDs) { + return false + } + if len(r.OverloadIDs) != 0 { + overloadMap := make(map[string]struct{}, len(r.OverloadIDs)) + for _, id := range r.OverloadIDs { + overloadMap[id] = struct{}{} + } + for _, id := range other.OverloadIDs { + _, found := overloadMap[id] + if !found { + return false + } + } + } + if r.Value == nil && other.Value == nil { + return true + } + if r.Value == nil && other.Value != nil || + r.Value != nil && other.Value == nil || + r.Value.Equal(other.Value) != types.True { + return false + } + return true +} + +// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. +func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) { + c, err := ValToConstant(info.Value) + if err != nil { + return nil, err + } + return &exprpb.Reference{ + Name: info.Name, + OverloadId: info.OverloadIDs, + Value: c, + }, nil +} + +// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. +func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { + v, err := ConstantToVal(ref.GetValue()) + if err != nil { + return nil, err + } + return &ReferenceInfo{ + Name: ref.GetName(), + OverloadIDs: ref.GetOverloadId(), + Value: v, + }, nil +} + +// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. +// +// Only simple scalar types are supported by this method. +func ValToConstant(v ref.Val) (*exprpb.Constant, error) { + if v == nil { + return nil, nil + } + switch v.Type() { + case types.BoolType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil + case types.NullType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil + case types.StringType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil + case types.UintType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) +} + +// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. +func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + if c == nil { + return nil, nil + } + switch c.GetConstantKind().(type) { + case *exprpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *exprpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *exprpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *exprpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *exprpb.Constant_NullValue: + return types.NullValue, nil + case *exprpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *exprpb.Constant_Uint64Value: + return types.Uint(c.GetUint64Value()), nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +} diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go new file mode 100644 index 000000000..b63884a60 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -0,0 +1,709 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// ExprKind represents the expression node kind. +type ExprKind int + +const ( + // UnspecifiedKind represents an unset expression with no specified properties. + UnspecifiedKind ExprKind = iota + + // LiteralKind represents a primitive scalar literal. + LiteralKind + + // IdentKind represents a simple variable, constant, or type identifier. + IdentKind + + // SelectKind represents a field selection expression. + SelectKind + + // CallKind represents a function call. + CallKind + + // ListKind represents a list literal expression. + ListKind + + // MapKind represents a map literal expression. + MapKind + + // StructKind represents a struct literal expression. + StructKind + + // ComprehensionKind represents a comprehension expression generated by a macro. + ComprehensionKind +) + +// NavigateCheckedAST converts a CheckedAST to a NavigableExpr +func NavigateCheckedAST(ast *CheckedAST) NavigableExpr { + return newNavigableExpr(nil, ast.Expr, ast.TypeMap) +} + +// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// +// This function type should be use with the `Match` and `MatchList` calls. +type ExprMatcher func(NavigableExpr) bool + +// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr +// is comprised of all constant values, such as a simple literal or even list and map literal. +func ConstantValueMatcher() ExprMatcher { + return matchIsConstantValue +} + +// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches +// the specified `kind`. +func KindMatcher(kind ExprKind) ExprMatcher { + return func(e NavigableExpr) bool { + return e.Kind() == kind + } +} + +// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose +// function name is equal to `funcName`. +func FunctionMatcher(funcName string) ExprMatcher { + return func(e NavigableExpr) bool { + if e.Kind() != CallKind { + return false + } + return e.AsCall().FunctionName() == funcName + } +} + +// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. +// +// Such a result would work well with subsequent MatchList calls. +func AllMatcher() ExprMatcher { + return func(NavigableExpr) bool { + return true + } +} + +// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the +// descendants which match. +func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { + return matchListInternal([]NavigableExpr{expr}, matcher, true) +} + +// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a +// subset of NavigableExpr values which match. +func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { + visit := make([]NavigableExpr, len(exprs)) + copy(visit, exprs) + return matchListInternal(visit, matcher, false) +} + +func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr { + var matched []NavigableExpr + for len(visit) != 0 { + e := visit[0] + if matcher(e) { + matched = append(matched, e) + } + if visitDescendants { + visit = append(visit[1:], e.Children()...) + } else { + visit = visit[1:] + } + } + return matched +} + +func matchIsConstantValue(e NavigableExpr) bool { + if e.Kind() == LiteralKind { + return true + } + if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { + for _, child := range e.Children() { + if !matchIsConstantValue(child) { + return false + } + } + return true + } + return false +} + +// NavigableExpr represents the base navigable expression value. +// +// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types +// as indicated by the `As` methods. +// +// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr +// to the wrong kind should produce a nil value. +type NavigableExpr interface { + // ID of the expression as it appears in the AST + ID() int64 + + // Kind of the expression node. See ExprKind for the valid enum values. + Kind() ExprKind + + // Type of the expression node. + Type() *types.Type + + // Parent returns the parent expression node, if one exists. + Parent() (NavigableExpr, bool) + + // Children returns a list of child expression nodes. + Children() []NavigableExpr + + // ToExpr adapts this NavigableExpr to a protobuf representation. + ToExpr() *exprpb.Expr + + // AsCall adapts the expr into a NavigableCallExpr + // + // The Kind() must be equal to a CallKind for the conversion to be well-defined. + AsCall() NavigableCallExpr + + // AsComprehension adapts the expr into a NavigableComprehensionExpr. + // + // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. + AsComprehension() NavigableComprehensionExpr + + // AsIdent adapts the expr into an identifier string. + // + // The Kind() must be equal to an IdentKind for the conversion to be well-defined. + AsIdent() string + + // AsLiteral adapts the expr into a constant ref.Val. + // + // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. + AsLiteral() ref.Val + + // AsList adapts the expr into a NavigableListExpr. + // + // The Kind() must be equal to a ListKind for the conversion to be well-defined. + AsList() NavigableListExpr + + // AsMap adapts the expr into a NavigableMapExpr. + // + // The Kind() must be equal to a MapKind for the conversion to be well-defined. + AsMap() NavigableMapExpr + + // AsSelect adapts the expr into a NavigableSelectExpr. + // + // The Kind() must be equal to a SelectKind for the conversion to be well-defined. + AsSelect() NavigableSelectExpr + + // AsStruct adapts the expr into a NavigableStructExpr. + // + // The Kind() must be equal to a StructKind for the conversion to be well-defined. + AsStruct() NavigableStructExpr + + // marker interface method + isNavigable() +} + +// NavigableCallExpr defines an interface for inspecting a function call and its arugments. +type NavigableCallExpr interface { + // FunctionName returns the name of the function. + FunctionName() string + + // Target returns the target of the expression if one is present. + Target() NavigableExpr + + // Args returns the list of call arguments, excluding the target. + Args() []NavigableExpr + + // ReturnType returns the result type of the call. + ReturnType() *types.Type + + // marker interface method + isNavigable() +} + +// NavigableListExpr defines an interface for inspecting a list literal expression. +type NavigableListExpr interface { + // Elements returns the list elements as navigable expressions. + Elements() []NavigableExpr + + // OptionalIndicies returns the list of optional indices in the list literal. + OptionalIndices() []int32 + + // Size returns the number of elements in the list. + Size() int + + // marker interface method + isNavigable() +} + +// NavigableSelectExpr defines an interface for inspecting a select expression. +type NavigableSelectExpr interface { + // Operand returns the selection operand expression. + Operand() NavigableExpr + + // FieldName returns the field name being selected from the operand. + FieldName() string + + // IsTestOnly indicates whether the select expression is a presence test generated by a macro. + IsTestOnly() bool + + // marker interface method + isNavigable() +} + +// NavigableMapExpr defines an interface for inspecting a map expression. +type NavigableMapExpr interface { + // Entries returns the map key value pairs as NavigableEntry values. + Entries() []NavigableEntry + + // Size returns the number of entries in the map. + Size() int + + // marker interface method + isNavigable() +} + +// NavigableEntry defines an interface for inspecting a map entry. +type NavigableEntry interface { + // Key returns the map entry key expression. + Key() NavigableExpr + + // Value returns the map entry value expression. + Value() NavigableExpr + + // IsOptional returns whether the entry is optional. + IsOptional() bool + + // marker interface method + isNavigable() +} + +// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers. +type NavigableStructExpr interface { + // TypeName returns the struct type name. + TypeName() string + + // Fields returns the set of field initializers in the struct expression as NavigableField values. + Fields() []NavigableField + + // marker interface method + isNavigable() +} + +// NavigableField defines an interface for inspecting a struct field initialization. +type NavigableField interface { + // FieldName returns the name of the field. + FieldName() string + + // Value returns the field initialization expression. + Value() NavigableExpr + + // IsOptional returns whether the field is optional. + IsOptional() bool + + // marker interface method + isNavigable() +} + +// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression. +type NavigableComprehensionExpr interface { + // IterRange returns the iteration range expression. + IterRange() NavigableExpr + + // IterVar returns the iteration variable name. + IterVar() string + + // AccuVar returns the accumulation variable name. + AccuVar() string + + // AccuInit returns the accumulation variable initialization expression. + AccuInit() NavigableExpr + + // LoopCondition returns the loop condition expression. + LoopCondition() NavigableExpr + + // LoopStep returns the loop step expression. + LoopStep() NavigableExpr + + // Result returns the comprehension result expression. + Result() NavigableExpr + + // marker interface method + isNavigable() +} + +func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr { + kind, factory := kindOf(expr) + nav := &navigableExprImpl{ + parent: parent, + kind: kind, + expr: expr, + typeMap: typeMap, + createChildren: factory, + } + return nav +} + +type navigableExprImpl struct { + parent NavigableExpr + kind ExprKind + expr *exprpb.Expr + typeMap map[int64]*types.Type + createChildren childFactory +} + +func (nav *navigableExprImpl) ID() int64 { + return nav.ToExpr().GetId() +} + +func (nav *navigableExprImpl) Kind() ExprKind { + return nav.kind +} + +func (nav *navigableExprImpl) Type() *types.Type { + if t, found := nav.typeMap[nav.ID()]; found { + return t + } + return types.DynType +} + +func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { + if nav.parent != nil { + return nav.parent, true + } + return nil, false +} + +func (nav *navigableExprImpl) Children() []NavigableExpr { + return nav.createChildren(nav) +} + +func (nav *navigableExprImpl) ToExpr() *exprpb.Expr { + return nav.expr +} + +func (nav *navigableExprImpl) AsCall() NavigableCallExpr { + return navigableCallImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr { + return navigableComprehensionImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsIdent() string { + return nav.ToExpr().GetIdentExpr().GetName() +} + +func (nav *navigableExprImpl) AsLiteral() ref.Val { + if nav.Kind() != LiteralKind { + return nil + } + val, err := ConstantToVal(nav.ToExpr().GetConstExpr()) + if err != nil { + panic(err) + } + return val +} + +func (nav *navigableExprImpl) AsList() NavigableListExpr { + return navigableListImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsMap() NavigableMapExpr { + return navigableMapImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr { + return navigableSelectImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsStruct() NavigableStructExpr { + return navigableStructImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr { + return newNavigableExpr(nav, e, nav.typeMap) +} + +func (nav *navigableExprImpl) isNavigable() {} + +type navigableCallImpl struct { + *navigableExprImpl +} + +func (call navigableCallImpl) FunctionName() string { + return call.ToExpr().GetCallExpr().GetFunction() +} + +func (call navigableCallImpl) Target() NavigableExpr { + t := call.ToExpr().GetCallExpr().GetTarget() + if t != nil { + return call.createChild(t) + } + return nil +} + +func (call navigableCallImpl) Args() []NavigableExpr { + args := call.ToExpr().GetCallExpr().GetArgs() + navArgs := make([]NavigableExpr, len(args)) + for i, a := range args { + navArgs[i] = call.createChild(a) + } + return navArgs +} + +func (call navigableCallImpl) ReturnType() *types.Type { + return call.Type() +} + +type navigableComprehensionImpl struct { + *navigableExprImpl +} + +func (comp navigableComprehensionImpl) IterRange() NavigableExpr { + return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange()) +} + +func (comp navigableComprehensionImpl) IterVar() string { + return comp.ToExpr().GetComprehensionExpr().GetIterVar() +} + +func (comp navigableComprehensionImpl) AccuVar() string { + return comp.ToExpr().GetComprehensionExpr().GetAccuVar() +} + +func (comp navigableComprehensionImpl) AccuInit() NavigableExpr { + return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit()) +} + +func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr { + return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition()) +} + +func (comp navigableComprehensionImpl) LoopStep() NavigableExpr { + return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep()) +} + +func (comp navigableComprehensionImpl) Result() NavigableExpr { + return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult()) +} + +type navigableListImpl struct { + *navigableExprImpl +} + +func (l navigableListImpl) Elements() []NavigableExpr { + return l.Children() +} + +func (l navigableListImpl) OptionalIndices() []int32 { + return l.ToExpr().GetListExpr().GetOptionalIndices() +} + +func (l navigableListImpl) Size() int { + return len(l.ToExpr().GetListExpr().GetElements()) +} + +type navigableMapImpl struct { + *navigableExprImpl +} + +func (m navigableMapImpl) Entries() []NavigableEntry { + mapExpr := m.ToExpr().GetStructExpr() + entries := make([]NavigableEntry, len(mapExpr.GetEntries())) + for i, e := range mapExpr.GetEntries() { + entries[i] = navigableEntryImpl{ + key: m.createChild(e.GetMapKey()), + val: m.createChild(e.GetValue()), + isOpt: e.GetOptionalEntry(), + } + } + return entries +} + +func (m navigableMapImpl) Size() int { + return len(m.ToExpr().GetStructExpr().GetEntries()) +} + +type navigableEntryImpl struct { + key NavigableExpr + val NavigableExpr + isOpt bool +} + +func (e navigableEntryImpl) Key() NavigableExpr { + return e.key +} + +func (e navigableEntryImpl) Value() NavigableExpr { + return e.val +} + +func (e navigableEntryImpl) IsOptional() bool { + return e.isOpt +} + +func (e navigableEntryImpl) isNavigable() {} + +type navigableSelectImpl struct { + *navigableExprImpl +} + +func (sel navigableSelectImpl) FieldName() string { + return sel.ToExpr().GetSelectExpr().GetField() +} + +func (sel navigableSelectImpl) IsTestOnly() bool { + return sel.ToExpr().GetSelectExpr().GetTestOnly() +} + +func (sel navigableSelectImpl) Operand() NavigableExpr { + return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand()) +} + +type navigableStructImpl struct { + *navigableExprImpl +} + +func (s navigableStructImpl) TypeName() string { + return s.ToExpr().GetStructExpr().GetMessageName() +} + +func (s navigableStructImpl) Fields() []NavigableField { + fieldInits := s.ToExpr().GetStructExpr().GetEntries() + fields := make([]NavigableField, len(fieldInits)) + for i, f := range fieldInits { + fields[i] = navigableFieldImpl{ + name: f.GetFieldKey(), + val: s.createChild(f.GetValue()), + isOpt: f.GetOptionalEntry(), + } + } + return fields +} + +type navigableFieldImpl struct { + name string + val NavigableExpr + isOpt bool +} + +func (f navigableFieldImpl) FieldName() string { + return f.name +} + +func (f navigableFieldImpl) Value() NavigableExpr { + return f.val +} + +func (f navigableFieldImpl) IsOptional() bool { + return f.isOpt +} + +func (f navigableFieldImpl) isNavigable() {} + +func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) { + switch expr.GetExprKind().(type) { + case *exprpb.Expr_ConstExpr: + return LiteralKind, noopFactory + case *exprpb.Expr_IdentExpr: + return IdentKind, noopFactory + case *exprpb.Expr_SelectExpr: + return SelectKind, selectFactory + case *exprpb.Expr_CallExpr: + return CallKind, callArgFactory + case *exprpb.Expr_ListExpr: + return ListKind, listElemFactory + case *exprpb.Expr_StructExpr: + if expr.GetStructExpr().GetMessageName() != "" { + return StructKind, structEntryFactory + } + return MapKind, mapEntryFactory + case *exprpb.Expr_ComprehensionExpr: + return ComprehensionKind, comprehensionFactory + default: + return UnspecifiedKind, noopFactory + } +} + +type childFactory func(*navigableExprImpl) []NavigableExpr + +func noopFactory(*navigableExprImpl) []NavigableExpr { + return nil +} + +func selectFactory(nav *navigableExprImpl) []NavigableExpr { + return []NavigableExpr{ + nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()), + } +} + +func callArgFactory(nav *navigableExprImpl) []NavigableExpr { + call := nav.ToExpr().GetCallExpr() + argCount := len(call.GetArgs()) + if call.GetTarget() != nil { + argCount++ + } + navExprs := make([]NavigableExpr, argCount) + i := 0 + if call.GetTarget() != nil { + navExprs[i] = nav.createChild(call.GetTarget()) + i++ + } + for _, arg := range call.GetArgs() { + navExprs[i] = nav.createChild(arg) + i++ + } + return navExprs +} + +func listElemFactory(nav *navigableExprImpl) []NavigableExpr { + l := nav.ToExpr().GetListExpr() + navExprs := make([]NavigableExpr, len(l.GetElements())) + for i, e := range l.GetElements() { + navExprs[i] = nav.createChild(e) + } + return navExprs +} + +func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.ToExpr().GetStructExpr() + entries := make([]NavigableExpr, len(s.GetEntries())) + for i, e := range s.GetEntries() { + + entries[i] = nav.createChild(e.GetValue()) + } + return entries +} + +func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.ToExpr().GetStructExpr() + entries := make([]NavigableExpr, len(s.GetEntries())*2) + j := 0 + for _, e := range s.GetEntries() { + entries[j] = nav.createChild(e.GetMapKey()) + entries[j+1] = nav.createChild(e.GetValue()) + j += 2 + } + return entries +} + +func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { + compre := nav.ToExpr().GetComprehensionExpr() + return []NavigableExpr{ + nav.createChild(compre.GetIterRange()), + nav.createChild(compre.GetAccuInit()), + nav.createChild(compre.GetLoopCondition()), + nav.createChild(compre.GetLoopStep()), + nav.createChild(compre.GetResult()), + } +} diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel new file mode 100644 index 000000000..17791dce6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "decls.go", + ], + importpath = "github.com/google/cel-go/common/decls", + deps = [ + "//checker/decls:go_default_library", + "//common/functions:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "decls_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//checker/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go new file mode 100644 index 000000000..734ebe57e --- /dev/null +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -0,0 +1,844 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package decls contains function and variable declaration structs and helper methods. +package decls + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// NewFunction creates a new function declaration with a set of function options to configure overloads +// and function definitions (implementations). +// +// Functions are checked for name collisions and singleton redefinition. +func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) { + fn := &FunctionDecl{ + name: name, + overloads: map[string]*OverloadDecl{}, + overloadOrdinals: []string{}, + } + var err error + for _, opt := range opts { + fn, err = opt(fn) + if err != nil { + return nil, err + } + } + if len(fn.overloads) == 0 { + return nil, fmt.Errorf("function %s must have at least one overload", name) + } + return fn, nil +} + +// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all +// overload instances. +type FunctionDecl struct { + name string + + // overloads associated with the function name. + overloads map[string]*OverloadDecl + + // singleton implementation of the function for all overloads. + // + // If this option is set, an error will occur if any overloads specify a per-overload implementation + // or if another function with the same name attempts to redefine the singleton. + singleton *functions.Overload + + // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could + // add overhead on common operations. Setting this option true leaves error checks and argument checks + // intact. + disableTypeGuards bool + + // state indicates that the binding should be provided as a declaration, as a runtime binding, or both. + state declarationState + + // overloadOrdinals indicates the order in which the overload was declared. + overloadOrdinals []string +} + +type declarationState int + +const ( + declarationStateUnset declarationState = iota + declarationDisabled + declarationEnabled +) + +// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least' +func (f *FunctionDecl) Name() string { + if f == nil { + return "" + } + return f.name +} + +// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the +// declaration should not be exposed for use in expressions. +func (f *FunctionDecl) IsDeclarationDisabled() bool { + return f.state == declarationDisabled +} + +// Merge combines an existing function declaration with another. +// +// If a function is extended, by say adding new overloads to an existing function, then it is merged with the +// prior definition of the function at which point its overloads must not collide with pre-existing overloads +// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. +func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) { + if f == other { + return f, nil + } + if f.Name() != other.Name() { + return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name()) + } + merged := &FunctionDecl{ + name: f.Name(), + overloads: make(map[string]*OverloadDecl, len(f.overloads)), + singleton: f.singleton, + overloadOrdinals: make([]string, len(f.overloads)), + // if one function is expecting type-guards and the other is not, then they + // must not be disabled. + disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards, + // default to the current functions declaration state. + state: f.state, + } + // If the other state indicates that the declaration should be explicitly enabled or + // disabled, then update the merged state with the most recent value. + if other.state != declarationStateUnset { + merged.state = other.state + } + // baseline copy of the overloads and their ordinals + copy(merged.overloadOrdinals, f.overloadOrdinals) + for oID, o := range f.overloads { + merged.overloads[oID] = o + } + // overloads and their ordinals are added from the left + for _, oID := range other.overloadOrdinals { + o := other.overloads[oID] + err := merged.AddOverload(o) + if err != nil { + return nil, fmt.Errorf("function declaration merge failed: %v", err) + } + } + if other.singleton != nil { + if merged.singleton != nil && merged.singleton != other.singleton { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + merged.singleton = other.singleton + } + return merged, nil +} + +// AddOverload ensures that the new overload does not collide with an existing overload signature; +// however, if the function signatures are identical, the implementation may be rewritten as its +// difficult to compare functions by object identity. +func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { + if f == nil { + return fmt.Errorf("nil function cannot add overload: %s", overload.ID()) + } + for oID, o := range f.overloads { + if oID != overload.ID() && o.SignatureOverlaps(overload) { + return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID()) + } + if oID == overload.ID() { + if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { + // Allow redefinition of an overload implementation so long as the signatures match. + f.overloads[oID] = overload + return nil + } + return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) + } + } + f.overloadOrdinals = append(f.overloadOrdinals, overload.ID()) + f.overloads[overload.ID()] = overload + return nil +} + +// OverloadDecls returns the overload declarations in the order in which they were declared. +func (f *FunctionDecl) OverloadDecls() []*OverloadDecl { + if f == nil { + return []*OverloadDecl{} + } + overloads := make([]*OverloadDecl, 0, len(f.overloads)) + for _, oID := range f.overloadOrdinals { + overloads = append(overloads, f.overloads[oID]) + } + return overloads +} + +// Bindings produces a set of function bindings, if any are defined. +func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { + if f == nil { + return []*functions.Overload{}, nil + } + overloads := []*functions.Overload{} + nonStrict := false + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + if o.hasBinding() { + overload := &functions.Overload{ + Operator: o.ID(), + Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards), + Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards), + Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards), + OperandTrait: o.OperandTrait(), + NonStrict: o.IsNonStrict(), + } + overloads = append(overloads, overload) + nonStrict = nonStrict || o.IsNonStrict() + } + } + if f.singleton != nil { + if len(overloads) != 0 { + return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name()) + } + overloads = []*functions.Overload{ + { + Operator: f.Name(), + Unary: f.singleton.Unary, + Binary: f.singleton.Binary, + Function: f.singleton.Function, + OperandTrait: f.singleton.OperandTrait, + }, + } + // fall-through to return single overload case. + } + if len(overloads) == 0 { + return overloads, nil + } + // Single overload. Replicate an entry for it using the function name as well. + if len(overloads) == 1 { + if overloads[0].Operator == f.Name() { + return overloads, nil + } + return append(overloads, &functions.Overload{ + Operator: f.Name(), + Unary: overloads[0].Unary, + Binary: overloads[0].Binary, + Function: overloads[0].Function, + NonStrict: overloads[0].NonStrict, + OperandTrait: overloads[0].OperandTrait, + }), nil + } + // All of the defined overloads are wrapped into a top-level function which + // performs dynamic dispatch to the proper overload based on the argument types. + bindings := append([]*functions.Overload{}, overloads...) + funcDispatch := func(args ...ref.Val) ref.Val { + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + // During dynamic dispatch over multiple functions, signature agreement checks + // are preserved in order to assist with the function resolution step. + switch len(args) { + case 1: + if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.unaryOp(args[0]) + } + case 2: + if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.binaryOp(args[0], args[1]) + } + } + if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.functionOp(args...) + } + // eventually this will fall through to the noSuchOverload below. + } + return MaybeNoSuchOverload(f.Name(), args...) + } + function := &functions.Overload{ + Operator: f.Name(), + Function: funcDispatch, + NonStrict: nonStrict, + } + return append(bindings, function), nil +} + +// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or +// to return an unknown set, or to produce a new error for a missing function signature. +func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val { + argTypes := make([]string, len(args)) + var unk *types.Unknown = nil + for i, arg := range args { + if types.IsError(arg) { + return arg + } + if types.IsUnknown(arg) { + unk = types.MergeUnknowns(arg.(*types.Unknown), unk) + } + argTypes[i] = arg.Type().TypeName() + } + if unk != nil { + return unk + } + signature := strings.Join(argTypes, ", ") + return types.NewErr("no such overload: %s(%s)", funcName, signature) +} + +// FunctionOpt defines a functional option for mutating a function declaration. +type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error) + +// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls. +// Type guards remain on during dynamic dispatch for parsed-only expressions. +func DisableTypeGuards(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + fn.disableTypeGuards = value + return fn, nil + } +} + +// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function +// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations +// of function declarations while still preserving the runtime behavior for previously compiled expressions. +func DisableDeclaration(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + if value { + fn.state = declarationDisabled + } else { + fn.state = declarationEnabled + } + return fn, nil + } +} + +// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Unary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Binary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Function: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// Overload defines a new global overload with an overload id, argument types, and result type. Through the +// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to +// be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func Overload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, false, args, resultType, opts...) +} + +// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, +// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, +// an operand trait, and to be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func MemberOverload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, true, args, resultType, opts...) +} + +func newOverload(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return func(f *FunctionDecl) (*FunctionDecl, error) { + overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...) + if err != nil { + return nil, err + } + err = f.AddOverload(overload) + if err != nil { + return nil, err + } + return f, nil + } +} + +func newOverloadInternal(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) (*OverloadDecl, error) { + overload := &OverloadDecl{ + id: overloadID, + argTypes: args, + resultType: resultType, + isMemberFunction: memberFunction, + } + var err error + for _, opt := range opts { + overload, err = opt(overload) + if err != nil { + return nil, err + } + } + return overload, nil +} + +// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional +// implementation. +type OverloadDecl struct { + id string + argTypes []*types.Type + resultType *types.Type + isMemberFunction bool + // nonStrict indicates that the function will accept error and unknown arguments as inputs. + nonStrict bool + // operandTrait indicates whether the member argument should have a specific type-trait. + // + // This is useful for creating overloads which operate on a type-interface rather than a concrete type. + operandTrait int + + // Function implementation options. Optional, but encouraged. + // unaryOp is a function binding that takes a single argument. + unaryOp functions.UnaryOp + // binaryOp is a function binding that takes two arguments. + binaryOp functions.BinaryOp + // functionOp is a catch-all for zero-arity and three-plus arity functions. + functionOp functions.FunctionOp +} + +// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker +// and interpreter to optimize performance. +// +// The ID format is usually one of two styles: +// global: __ +// member: ___ +func (o *OverloadDecl) ID() string { + if o == nil { + return "" + } + return o.id +} + +// ArgTypes contains the set of argument types expected by the overload. +// +// For member functions ArgTypes[0] represents the member operand type. +func (o *OverloadDecl) ArgTypes() []*types.Type { + if o == nil { + return emptyArgs + } + return o.argTypes +} + +// IsMemberFunction indicates whether the overload is a member function +func (o *OverloadDecl) IsMemberFunction() bool { + if o == nil { + return false + } + return o.isMemberFunction +} + +// IsNonStrict returns whether the overload accepts errors and unknown values as arguments. +func (o *OverloadDecl) IsNonStrict() bool { + if o == nil { + return false + } + return o.nonStrict +} + +// OperandTrait returns the trait mask of the first operand to the overload call, e.g. +// `traits.Indexer` +func (o *OverloadDecl) OperandTrait() int { + if o == nil { + return 0 + } + return o.operandTrait +} + +// ResultType indicates the output type from calling the function. +func (o *OverloadDecl) ResultType() *types.Type { + if o == nil { + // *types.Type is nil-safe + return nil + } + return o.resultType +} + +// TypeParams returns the type parameter names associated with the overload. +func (o *OverloadDecl) TypeParams() []string { + typeParams := map[string]struct{}{} + collectParamNames(typeParams, o.ResultType()) + for _, arg := range o.ArgTypes() { + collectParamNames(typeParams, arg) + } + params := make([]string, 0, len(typeParams)) + for param := range typeParams { + params = append(params, param) + } + return params +} + +// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature. +// +// Result type, operand trait, and strict-ness are not considered as part of signature equality. +func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool { + if o == other { + return true + } + if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + for i, at := range o.ArgTypes() { + oat := other.ArgTypes()[i] + if !at.IsEquivalentType(oat) { + return false + } + } + return o.ResultType().IsEquivalentType(other.ResultType()) +} + +// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures. +// +// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type. +func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool { + if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + argsOverlap := true + for i, argType := range o.ArgTypes() { + otherArgType := other.ArgTypes()[i] + argsOverlap = argsOverlap && + (argType.IsAssignableType(otherArgType) || + otherArgType.IsAssignableType(argType)) + } + return argsOverlap +} + +// hasBinding indicates whether the overload already has a definition. +func (o *OverloadDecl) hasBinding() bool { + return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil) +} + +// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. +func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp { + if o.unaryOp == nil { + return nil + } + return func(arg ref.Val) ref.Val { + if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) { + return MaybeNoSuchOverload(funcName, arg) + } + return o.unaryOp(arg) + } +} + +// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. +func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp { + if o.binaryOp == nil { + return nil + } + return func(arg1, arg2 ref.Val) ref.Val { + if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) { + return MaybeNoSuchOverload(funcName, arg1, arg2) + } + return o.binaryOp(arg1, arg2) + } +} + +// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. +func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp { + if o.functionOp == nil { + return nil + } + return func(args ...ref.Val) ref.Val { + if !o.matchesRuntimeSignature(disableTypeGuards, args...) { + return MaybeNoSuchOverload(funcName, args...) + } + return o.functionOp(args...) + } +} + +// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. +func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) && + matchOperandTrait(o.OperandTrait(), arg) +} + +// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) && + matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) && + matchOperandTrait(o.OperandTrait(), arg1) +} + +// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool { + if len(args) != len(o.ArgTypes()) { + return false + } + if len(args) == 0 { + return true + } + for i, arg := range args { + if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) { + return false + } + } + return matchOperandTrait(o.OperandTrait(), args[0]) +} + +func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool { + if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) { + return true + } + if types.IsUnknownOrError(arg) { + return false + } + return disableTypeGuards || argType.IsAssignableRuntimeType(arg) +} + +func matchOperandTrait(trait int, arg ref.Val) bool { + return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg) +} + +// OverloadOpt is a functional option for configuring a function overload. +type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error) + +// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func UnaryBinding(binding functions.UnaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 1 { + return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID()) + } + o.unaryOp = binding + return o, nil + } +} + +// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func BinaryBinding(binding functions.BinaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 2 { + return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID()) + } + o.binaryOp = binding + return o, nil + } +} + +// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func FunctionBinding(binding functions.FunctionOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + o.functionOp = binding + return o, nil + } +} + +// OverloadIsNonStrict enables the function to be called with error and unknown argument values. +// +// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. +func OverloadIsNonStrict() OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.nonStrict = true + return o, nil + } +} + +// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be +// successfully invoked. +func OverloadOperandTrait(trait int) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.operandTrait = trait + return o, nil + } +} + +// NewConstant creates a new constant declaration. +func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl { + return &VariableDecl{name: name, varType: t, value: v} +} + +// NewVariable creates a new variable declaration. +func NewVariable(name string, t *types.Type) *VariableDecl { + return &VariableDecl{name: name, varType: t} +} + +// VariableDecl defines a variable declaration which may optionally have a constant value. +type VariableDecl struct { + name string + varType *types.Type + value ref.Val +} + +// Name returns the fully-qualified variable name +func (v *VariableDecl) Name() string { + if v == nil { + return "" + } + return v.name +} + +// Type returns the types.Type value associated with the variable. +func (v *VariableDecl) Type() *types.Type { + if v == nil { + // types.Type is nil-safe + return nil + } + return v.varType +} + +// Value returns the constant value associated with the declaration. +func (v *VariableDecl) Value() ref.Val { + if v == nil { + return nil + } + return v.value +} + +// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input. +func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { + if v == other { + return true + } + return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) +} + +// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { + varType, err := types.TypeToExprType(v.Type()) + if err != nil { + return nil, err + } + return chkdecls.NewVar(v.Name(), varType), nil +} + +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { + overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) + for i, oID := range f.overloadOrdinals { + o := f.overloads[oID] + paramNames := map[string]struct{}{} + argTypes := make([]*exprpb.Type, len(o.ArgTypes())) + for j, a := range o.ArgTypes() { + collectParamNames(paramNames, a) + at, err := types.TypeToExprType(a) + if err != nil { + return nil, err + } + argTypes[j] = at + } + collectParamNames(paramNames, o.ResultType()) + resultType, err := types.TypeToExprType(o.ResultType()) + if err != nil { + return nil, err + } + if len(paramNames) == 0 { + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType) + } else { + overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType) + } + } else { + params := []string{} + for pn := range paramNames { + params = append(params, pn) + } + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params) + } else { + overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params) + } + } + } + return chkdecls.NewFunction(f.Name(), overloads...), nil +} + +func collectParamNames(paramNames map[string]struct{}, arg *types.Type) { + if arg.Kind() == types.TypeParamKind { + paramNames[arg.TypeName()] = struct{}{} + } + for _, param := range arg.Parameters() { + collectParamNames(paramNames, param) + } +} + +var ( + emptyArgs = []*types.Type{} +) diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index f91f7f8d1..774dcb5b4 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -22,10 +22,16 @@ import ( "golang.org/x/text/width" ) -// Error type which references a location within source and a message. +// NewError creates an error associated with an expression id with the given message at the given location. +func NewError(id int64, message string, location Location) *Error { + return &Error{Message: message, Location: location, ExprID: id} +} + +// Error type which references an expression id, a location within source, and a message. type Error struct { Location Location Message string + ExprID int64 } const ( diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go index 1565085ab..63919714e 100644 --- a/vendor/github.com/google/cel-go/common/errors.go +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -22,7 +22,7 @@ import ( // Errors type which contains a list of errors observed during parsing. type Errors struct { - errors []Error + errors []*Error source Source numErrors int maxErrorsToReport int @@ -31,7 +31,7 @@ type Errors struct { // NewErrors creates a new instance of the Errors type. func NewErrors(source Source) *Errors { return &Errors{ - errors: []Error{}, + errors: []*Error{}, source: source, maxErrorsToReport: 100, } @@ -39,11 +39,17 @@ func NewErrors(source Source) *Errors { // ReportError records an error at a source location. func (e *Errors) ReportError(l Location, format string, args ...any) { + e.ReportErrorAtID(0, l, format, args...) +} + +// ReportErrorAtID records an error at a source location and expression id. +func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { e.numErrors++ if e.numErrors > e.maxErrorsToReport { return } - err := Error{ + err := &Error{ + ExprID: id, Location: l, Message: fmt.Sprintf(format, args...), } @@ -51,12 +57,12 @@ func (e *Errors) ReportError(l Location, format string, args ...any) { } // GetErrors returns the list of observed errors. -func (e *Errors) GetErrors() []Error { +func (e *Errors) GetErrors() []*Error { return e.errors[:] } // Append creates a new Errors object with the current and input errors. -func (e *Errors) Append(errs []Error) *Errors { +func (e *Errors) Append(errs []*Error) *Errors { return &Errors{ errors: append(e.errors, errs...), source: e.source, diff --git a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel new file mode 100644 index 000000000..3cc27d60c --- /dev/null +++ b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "functions.go", + ], + importpath = "github.com/google/cel-go/common/functions", + deps = [ + "//common/types/ref:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/functions/functions.go b/vendor/github.com/google/cel-go/common/functions/functions.go new file mode 100644 index 000000000..67f4a5944 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/functions/functions.go @@ -0,0 +1,61 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package functions defines the standard builtin functions supported by the interpreter +package functions + +import "github.com/google/cel-go/common/types/ref" + +// Overload defines a named overload of a function, indicating an operand trait +// which must be present on the first argument to the overload as well as one +// of either a unary, binary, or function implementation. +// +// The majority of operators within the expression language are unary or binary +// and the specializations simplify the call contract for implementers of +// types with operator overloads. Any added complexity is assumed to be handled +// by the generic FunctionOp. +type Overload struct { + // Operator name as written in an expression or defined within + // operators.go. + Operator string + + // Operand trait used to dispatch the call. The zero-value indicates a + // global function overload or that one of the Unary / Binary / Function + // definitions should be used to execute the call. + OperandTrait int + + // Unary defines the overload with a UnaryOp implementation. May be nil. + Unary UnaryOp + + // Binary defines the overload with a BinaryOp implementation. May be nil. + Binary BinaryOp + + // Function defines the overload with a FunctionOp implementation. May be + // nil. + Function FunctionOp + + // NonStrict specifies whether the Overload will tolerate arguments that + // are types.Err or types.Unknown. + NonStrict bool +} + +// UnaryOp is a function that takes a single value and produces an output. +type UnaryOp func(value ref.Val) ref.Val + +// BinaryOp is a function that takes two values and produces an output. +type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val + +// FunctionOp is a function with accepts zero or more arguments and produces +// a value or error as a result. +type FunctionOp func(values ...ref.Val) ref.Val diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index 52377d930..acf22bdf1 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -64,7 +64,6 @@ type sourceImpl struct { runes.Buffer description string lineOffsets []int32 - idOffsets map[int64]int32 } var _ runes.Buffer = &sourceImpl{} @@ -92,7 +91,6 @@ func NewStringSource(contents string, description string) Source { Buffer: runes.NewBuffer(contents), description: description, lineOffsets: offsets, - idOffsets: map[int64]int32{}, } } @@ -102,7 +100,6 @@ func NewInfoSource(info *exprpb.SourceInfo) Source { Buffer: runes.NewBuffer(""), description: info.GetLocation(), lineOffsets: info.GetLineOffsets(), - idOffsets: info.GetPositions(), } } diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel new file mode 100644 index 000000000..c130a93f6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "standard.go", + ], + importpath = "github.com/google/cel-go/common/stdlib", + deps = [ + "//checker/decls:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go new file mode 100644 index 000000000..d02cb64bf --- /dev/null +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -0,0 +1,661 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stdlib contains all of the standard library function declarations and definitions for CEL. +package stdlib + +import ( + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +var ( + stdFunctions []*decls.FunctionDecl + stdFnDecls []*exprpb.Decl + stdTypes []*decls.VariableDecl + stdTypeDecls []*exprpb.Decl +) + +func init() { + paramA := types.NewTypeParamType("A") + paramB := types.NewTypeParamType("B") + listOfA := types.NewListType(paramA) + mapOfAB := types.NewMapType(paramA, paramB) + + stdTypes = []*decls.VariableDecl{ + decls.TypeVariable(types.BoolType), + decls.TypeVariable(types.BytesType), + decls.TypeVariable(types.DoubleType), + decls.TypeVariable(types.DurationType), + decls.TypeVariable(types.IntType), + decls.TypeVariable(listOfA), + decls.TypeVariable(mapOfAB), + decls.TypeVariable(types.NullType), + decls.TypeVariable(types.StringType), + decls.TypeVariable(types.TimestampType), + decls.TypeVariable(types.TypeType), + decls.TypeVariable(types.UintType), + } + + stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) + for _, stdType := range stdTypes { + typeVar, err := decls.VariableDeclToExprDecl(stdType) + if err != nil { + panic(err) + } + stdTypeDecls = append(stdTypeDecls, typeVar) + } + + stdFunctions = []*decls.FunctionDecl{ + // Logical operators. Special-cased within the interpreter. + // Note, the singleton binding prevents extensions from overriding the operator behavior. + function(operators.Conditional, + decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA, + decls.OverloadIsNonStrict()), + decls.SingletonFunctionBinding(noFunctionOverrides)), + function(operators.LogicalAnd, + decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalOr, + decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalNot, + decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + b, ok := val.(types.Bool) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + return b.Negate() + })), + + // Comprehension short-circuiting related function + function(operators.NotStrictlyFalse, + decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + // Deprecated: __not_strictly_false__ + function(operators.OldNotStrictlyFalse, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + + // Equality / inequality. Special-cased in the interpreter + function(operators.Equals, + decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.NotEquals, + decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + + // Mathematical operators + function(operators.Add, + decls.Overload(overloads.AddBytes, + argTypes(types.BytesType, types.BytesType), types.BytesType), + decls.Overload(overloads.AddDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.AddDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.AddDurationTimestamp, + argTypes(types.DurationType, types.TimestampType), types.TimestampType), + decls.Overload(overloads.AddTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.AddInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.AddList, + argTypes(listOfA, listOfA), listOfA), + decls.Overload(overloads.AddString, + argTypes(types.StringType, types.StringType), types.StringType), + decls.Overload(overloads.AddUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Adder).Add(rhs) + }, traits.AdderType)), + function(operators.Divide, + decls.Overload(overloads.DivideDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.DivideInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.DivideUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Divider).Divide(rhs) + }, traits.DividerType)), + function(operators.Modulo, + decls.Overload(overloads.ModuloInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.ModuloUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Modder).Modulo(rhs) + }, traits.ModderType)), + function(operators.Multiply, + decls.Overload(overloads.MultiplyDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.MultiplyInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.MultiplyUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Multiplier).Multiply(rhs) + }, traits.MultiplierType)), + function(operators.Negate, + decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType), + decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + if types.IsBool(val) { + return types.MaybeNoSuchOverloadErr(val) + } + return val.(traits.Negater).Negate() + }, traits.NegatorType)), + function(operators.Subtract, + decls.Overload(overloads.SubtractDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.SubtractDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.SubtractInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.SubtractTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.SubtractTimestampTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.DurationType), + decls.Overload(overloads.SubtractUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Subtractor).Subtract(rhs) + }, traits.SubtractorType)), + + // Relations operators + + function(operators.Less, + decls.Overload(overloads.LessBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne { + return types.True + } + if cmp == types.IntOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.LessEquals, + decls.Overload(overloads.LessEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.Greater, + decls.Overload(overloads.GreaterBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne { + return types.True + } + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.GreaterEquals, + decls.Overload(overloads.GreaterEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntNegOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + // Indexing + function(operators.Index, + decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA), + decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Indexer).Get(rhs) + }, traits.IndexerType)), + + // Collections operators + function(operators.In, + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(operators.OldIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.DeprecatedIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.Size, + decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType), + decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType), + decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType), + decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType), + decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType), + decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType), + decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType), + decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + return val.(traits.Sizer).Size() + }, traits.SizerType)), + + // Type conversions + function(overloads.TypeConvertType, + decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)), + decls.SingletonUnaryBinding(convertToType(types.TypeType))), + + // Bool conversions + function(overloads.TypeConvertBool, + decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType, + decls.UnaryBinding(convertToType(types.BoolType)))), + + // Bytes conversions + function(overloads.TypeConvertBytes, + decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType, + decls.UnaryBinding(convertToType(types.BytesType)))), + + // Double conversions + function(overloads.TypeConvertDouble, + decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType)))), + + // Duration conversions + function(overloads.TypeConvertDuration, + decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType))), + decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType)))), + + // Dyn conversions + function(overloads.TypeConvertDyn, + decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType), + decls.SingletonUnaryBinding(identity)), + + // Int conversions + function(overloads.TypeConvertInt, + decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + ), + + // String conversions + function(overloads.TypeConvertString, + decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType)))), + + // Timestamp conversions + function(overloads.TypeConvertTimestamp, + decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType))), + decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType)))), + + // Uint conversions + function(overloads.TypeConvertUint, + decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType)))), + + // String functions + function(overloads.Contains, + decls.MemberOverload(overloads.ContainsString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringContains)), + decls.DisableTypeGuards(true)), + function(overloads.EndsWith, + decls.MemberOverload(overloads.EndsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringEndsWith)), + decls.DisableTypeGuards(true)), + function(overloads.StartsWith, + decls.MemberOverload(overloads.StartsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringStartsWith)), + decls.DisableTypeGuards(true)), + function(overloads.Matches, + decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType), + decls.MemberOverload(overloads.MatchesString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val { + return str.(traits.Matcher).Match(pat) + }, traits.MatcherType)), + + // Timestamp / duration functions + function(overloads.TimeGetFullYear, + decls.MemberOverload(overloads.TimestampToYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetMonth, + decls.MemberOverload(overloads.TimestampToMonth, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMonthWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfYear, + decls.MemberOverload(overloads.TimestampToDayOfYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfMonth, + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDate, + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfWeek, + decls.MemberOverload(overloads.TimestampToDayOfWeek, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetHours, + decls.MemberOverload(overloads.TimestampToHours, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToHoursWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToHours, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMinutes, + decls.MemberOverload(overloads.TimestampToMinutes, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMinutesWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMinutes, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetSeconds, + decls.MemberOverload(overloads.TimestampToSeconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToSecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToSeconds, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMilliseconds, + decls.MemberOverload(overloads.TimestampToMilliseconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMillisecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMilliseconds, + argTypes(types.DurationType), types.IntType)), + } + + stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) + for _, fn := range stdFunctions { + if fn.IsDeclarationDisabled() { + continue + } + ed, err := decls.FunctionDeclToExprDecl(fn) + if err != nil { + panic(err) + } + stdFnDecls = append(stdFnDecls, ed) + } +} + +// Functions returns the set of standard library function declarations and definitions for CEL. +func Functions() []*decls.FunctionDecl { + return stdFunctions +} + +// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads +// in the CEL standard environment. +// +// Deprecated: use Functions +func FunctionExprDecls() []*exprpb.Decl { + return stdFnDecls +} + +// Types returns the set of standard library types for CEL. +func Types() []*decls.VariableDecl { + return stdTypes +} + +// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL +// standard environment. +// +// Deprecated: use Types +func TypeExprDecls() []*exprpb.Decl { + return stdTypeDecls +} + +func notStrictlyFalse(value ref.Val) ref.Val { + if types.IsBool(value) { + return value + } + return types.True +} + +func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { + if rhs.Type().HasTrait(traits.ContainerType) { + return rhs.(traits.Container).Contains(lhs) + } + return types.ValOrErr(rhs, "no such overload") +} + +func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl { + fn, err := decls.NewFunction(name, opts...) + if err != nil { + panic(err) + } + return fn +} + +func argTypes(args ...*types.Type) []*types.Type { + return args +} + +func noBinaryOverrides(rhs, lhs ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func noFunctionOverrides(args ...ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func identity(val ref.Val) ref.Val { + return val +} + +func convertToType(t ref.Type) functions.UnaryOp { + return func(val ref.Val) ref.Val { + return val.ConvertToType(t) + } +} diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index 89c4feacb..b5e44ffbf 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -27,20 +27,20 @@ go_library( "provider.go", "string.go", "timestamp.go", - "type.go", + "types.go", "uint.go", "unknown.go", "util.go", ], importpath = "github.com/google/cel-go/common/types", deps = [ + "//checker/decls:go_default_library", "//common/overloads:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", @@ -71,8 +71,9 @@ go_test( "provider_test.go", "string_test.go", "timestamp_test.go", - "type_test.go", + "types_test.go", "uint_test.go", + "unknown_test.go", "util_test.go", ], embed = [":go_default_library"], diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go index a634ecc28..565734f3f 100644 --- a/vendor/github.com/google/cel-go/common/types/bool.go +++ b/vendor/github.com/google/cel-go/common/types/bool.go @@ -20,7 +20,6 @@ import ( "strconv" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -31,11 +30,6 @@ import ( type Bool bool var ( - // BoolType singleton. - BoolType = NewTypeValue("bool", - traits.ComparerType, - traits.NegatorType) - // boolWrapperType golang reflected type for protobuf bool wrapper type. boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) ) diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index bef190759..5838755f8 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -22,7 +22,6 @@ import ( "unicode/utf8" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -34,12 +33,6 @@ import ( type Bytes []byte var ( - // BytesType singleton. - BytesType = NewTypeValue("bytes", - traits.AdderType, - traits.ComparerType, - traits.SizerType) - // byteWrapperType golang reflected type for protobuf bytes wrapper type. byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) ) diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go index bda9f31a6..027e78978 100644 --- a/vendor/github.com/google/cel-go/common/types/double.go +++ b/vendor/github.com/google/cel-go/common/types/double.go @@ -20,7 +20,6 @@ import ( "reflect" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -32,15 +31,6 @@ import ( type Double float64 var ( - // DoubleType singleton. - DoubleType = NewTypeValue("double", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.MultiplierType, - traits.NegatorType, - traits.SubtractorType) - // doubleWrapperType reflected type for protobuf double wrapper type. doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go index c90ac1bee..596e56d6b 100644 --- a/vendor/github.com/google/cel-go/common/types/duration.go +++ b/vendor/github.com/google/cel-go/common/types/duration.go @@ -22,7 +22,6 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" dpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,13 +40,14 @@ func durationOf(d time.Duration) Duration { } var ( - // DurationType singleton. - DurationType = NewTypeValue("google.protobuf.Duration", - traits.AdderType, - traits.ComparerType, - traits.NegatorType, - traits.ReceiverType, - traits.SubtractorType) + durationValueType = reflect.TypeOf(&dpb.Duration{}) + + durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{ + overloads.TimeGetHours: DurationGetHours, + overloads.TimeGetMinutes: DurationGetMinutes, + overloads.TimeGetSeconds: DurationGetSeconds, + overloads.TimeGetMilliseconds: DurationGetMilliseconds, + } ) // Add implements traits.Adder.Add. @@ -156,7 +156,7 @@ func (d Duration) Negate() ref.Val { func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { if len(args) == 0 { if f, found := durationZeroArgOverloads[function]; found { - return f(d.Duration) + return f(d) } } return NoSuchOverloadErr() @@ -185,20 +185,38 @@ func (d Duration) Value() any { return d.Duration } -var ( - durationValueType = reflect.TypeOf(&dpb.Duration{}) +// DurationGetHours returns the duration in hours. +func DurationGetHours(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Hours()) +} - durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{ - overloads.TimeGetHours: func(dur time.Duration) ref.Val { - return Int(dur.Hours()) - }, - overloads.TimeGetMinutes: func(dur time.Duration) ref.Val { - return Int(dur.Minutes()) - }, - overloads.TimeGetSeconds: func(dur time.Duration) ref.Val { - return Int(dur.Seconds()) - }, - overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val { - return Int(dur.Milliseconds()) - }} -) +// DurationGetMinutes returns duration in minutes. +func DurationGetMinutes(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Minutes()) +} + +// DurationGetSeconds returns duration in seconds. +func DurationGetSeconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Seconds()) +} + +// DurationGetMilliseconds returns duration in milliseconds. +func DurationGetMilliseconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Milliseconds()) +} diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index b4874d9d4..aa8f94b4f 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -35,7 +35,7 @@ type Err struct { var ( // ErrType singleton. - ErrType = NewTypeValue("error") + ErrType = NewOpaqueType("error") // errDivideByZero is an error indicating a division by zero of an integer value. errDivideByZero = errors.New("division by zero") @@ -129,6 +129,11 @@ func (e *Err) Is(target error) bool { return e.error.Error() == target.Error() } +// Unwrap implements errors.Unwrap. +func (e *Err) Unwrap() error { + return e.error +} + // IsError returns whether the input element ref.Type or ref.Val is equal to // the ErrType singleton. func IsError(val ref.Val) bool { diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go index f5a9511c8..940772aed 100644 --- a/vendor/github.com/google/cel-go/common/types/int.go +++ b/vendor/github.com/google/cel-go/common/types/int.go @@ -22,7 +22,6 @@ import ( "time" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -41,16 +40,6 @@ const ( ) var ( - // IntType singleton. - IntType = NewTypeValue("int", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.ModderType, - traits.MultiplierType, - traits.NegatorType, - traits.SubtractorType) - // int32WrapperType reflected type for protobuf int32 wrapper type. int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go index 9f224ad4f..98e9147b6 100644 --- a/vendor/github.com/google/cel-go/common/types/iterator.go +++ b/vendor/github.com/google/cel-go/common/types/iterator.go @@ -24,7 +24,7 @@ import ( var ( // IteratorType singleton. - IteratorType = NewTypeValue("iterator", traits.IteratorType) + IteratorType = NewObjectType("iterator", traits.IteratorType) ) // baseIterator is the basis for list, map, and object iterators. diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index de5f2099b..d4932b4a9 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -29,25 +29,15 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" ) -var ( - // ListType singleton. - ListType = NewTypeValue("list", - traits.AdderType, - traits.ContainerType, - traits.IndexerType, - traits.IterableType, - traits.SizerType) -) - // NewDynamicList returns a traits.Lister with heterogenous elements. // value should be an array of "native" types, i.e. any type that // NativeToValue() can convert to a ref.Val. -func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister { +func NewDynamicList(adapter Adapter, value any) traits.Lister { refValue := reflect.ValueOf(value) return &baseList{ - TypeAdapter: adapter, - value: value, - size: refValue.Len(), + Adapter: adapter, + value: value, + size: refValue.Len(), get: func(i int) any { return refValue.Index(i).Interface() }, @@ -55,56 +45,56 @@ func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister { } // NewStringList returns a traits.Lister containing only strings. -func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister { +func NewStringList(adapter Adapter, elems []string) traits.Lister { return &baseList{ - TypeAdapter: adapter, - value: elems, - size: len(elems), - get: func(i int) any { return elems[i] }, + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, } } // NewRefValList returns a traits.Lister with ref.Val elements. // // This type specialization is used with list literals within CEL expressions. -func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister { +func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister { return &baseList{ - TypeAdapter: adapter, - value: elems, - size: len(elems), - get: func(i int) any { return elems[i] }, + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, } } // NewProtoList returns a traits.Lister based on a pb.List instance. -func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister { +func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister { return &baseList{ - TypeAdapter: adapter, - value: list, - size: list.Len(), - get: func(i int) any { return list.Get(i).Interface() }, + Adapter: adapter, + value: list, + size: list.Len(), + get: func(i int) any { return list.Get(i).Interface() }, } } // NewJSONList returns a traits.Lister based on structpb.ListValue instance. -func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister { +func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister { vals := l.GetValues() return &baseList{ - TypeAdapter: adapter, - value: l, - size: len(vals), - get: func(i int) any { return vals[i] }, + Adapter: adapter, + value: l, + size: len(vals), + get: func(i int) any { return vals[i] }, } } // NewMutableList creates a new mutable list whose internal state can be modified. -func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister { +func NewMutableList(adapter Adapter) traits.MutableLister { var mutableValues []ref.Val l := &mutableList{ baseList: &baseList{ - TypeAdapter: adapter, - value: mutableValues, - size: 0, + Adapter: adapter, + value: mutableValues, + size: 0, }, mutableValues: mutableValues, } @@ -116,9 +106,9 @@ func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister { // baseList points to a list containing elements of any type. // The `value` is an array of native values, and refValue is its reflection object. -// The `ref.TypeAdapter` enables native type to CEL type conversions. +// The `Adapter` enables native type to CEL type conversions. type baseList struct { - ref.TypeAdapter + Adapter value any // size indicates the number of elements within the list. @@ -143,9 +133,9 @@ func (l *baseList) Add(other ref.Val) ref.Val { return l } return &concatList{ - TypeAdapter: l.TypeAdapter, - prevList: l, - nextList: otherList} + Adapter: l.Adapter, + prevList: l, + nextList: otherList} } // Contains implements the traits.Container interface method. @@ -322,13 +312,13 @@ func (l *mutableList) Add(other ref.Val) ref.Val { func (l *mutableList) ToImmutableList() traits.Lister { // The reference to internal state is guaranteed to be safe as this call is only performed // when mutations have been completed. - return NewRefValList(l.TypeAdapter, l.mutableValues) + return NewRefValList(l.Adapter, l.mutableValues) } // concatList combines two list implementations together into a view. -// The `ref.TypeAdapter` enables native type to CEL type conversions. +// The `Adapter` enables native type to CEL type conversions. type concatList struct { - ref.TypeAdapter + Adapter value any prevList traits.Lister nextList traits.Lister @@ -347,9 +337,9 @@ func (l *concatList) Add(other ref.Val) ref.Val { return l } return &concatList{ - TypeAdapter: l.TypeAdapter, - prevList: l, - nextList: otherList} + Adapter: l.Adapter, + prevList: l, + nextList: otherList} } // Contains implements the traits.Container interface method. @@ -376,7 +366,7 @@ func (l *concatList) Contains(elem ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) { - combined := NewDynamicList(l.TypeAdapter, l.Value().([]any)) + combined := NewDynamicList(l.Adapter, l.Value().([]any)) return combined.ConvertToNative(typeDesc) } diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index 213be4ac9..739b7aab0 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -32,10 +32,10 @@ import ( ) // NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. -func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper { +func NewDynamicMap(adapter Adapter, value any) traits.Mapper { refValue := reflect.ValueOf(value) return &baseMap{ - TypeAdapter: adapter, + Adapter: adapter, mapAccessor: newReflectMapAccessor(adapter, refValue), value: value, size: refValue.Len(), @@ -46,10 +46,10 @@ func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper { // encoded in protocol buffer form. // // The `adapter` argument provides type adaptation capabilities from proto to CEL. -func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper { +func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper { fields := value.GetFields() return &baseMap{ - TypeAdapter: adapter, + Adapter: adapter, mapAccessor: newJSONStructAccessor(adapter, fields), value: value, size: len(fields), @@ -57,9 +57,9 @@ func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mappe } // NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. -func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper { +func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper { return &baseMap{ - TypeAdapter: adapter, + Adapter: adapter, mapAccessor: newRefValMapAccessor(value), value: value, size: len(value), @@ -67,9 +67,9 @@ func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Map } // NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. -func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits.Mapper { +func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper { return &baseMap{ - TypeAdapter: adapter, + Adapter: adapter, mapAccessor: newStringIfaceMapAccessor(adapter, value), value: value, size: len(value), @@ -77,9 +77,9 @@ func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits } // NewStringStringMap returns a specialized traits.Mapper with string keys and values. -func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper { +func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper { return &baseMap{ - TypeAdapter: adapter, + Adapter: adapter, mapAccessor: newStringMapAccessor(value), value: value, size: len(value), @@ -87,22 +87,13 @@ func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits } // NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. -func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper { +func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { return &protoMap{ - TypeAdapter: adapter, - value: value, + Adapter: adapter, + value: value, } } -var ( - // MapType singleton. - MapType = NewTypeValue("map", - traits.ContainerType, - traits.IndexerType, - traits.IterableType, - traits.SizerType) -) - // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -121,7 +112,7 @@ type mapAccessor interface { // Since CEL is side-effect free, the base map represents an immutable object. type baseMap struct { // TypeAdapter used to convert keys and values accessed within the map. - ref.TypeAdapter + Adapter // mapAccessor interface implementation used to find and iterate over map keys. mapAccessor @@ -316,15 +307,15 @@ func (m *baseMap) Value() any { return m.value } -func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor { +func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ - TypeAdapter: adapter, - st: st, + Adapter: adapter, + st: st, } } type jsonStructAccessor struct { - ref.TypeAdapter + Adapter st map[string]*structpb.Value } @@ -359,17 +350,17 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } -func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor { +func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ - TypeAdapter: adapter, - refValue: value, - keyType: keyType, + Adapter: adapter, + refValue: value, + keyType: keyType, } } type reflectMapAccessor struct { - ref.TypeAdapter + Adapter refValue reflect.Value keyType reflect.Type } @@ -427,9 +418,9 @@ func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) { // Iterator creates a Golang reflection based traits.Iterator. func (m *reflectMapAccessor) Iterator() traits.Iterator { return &mapIterator{ - TypeAdapter: m.TypeAdapter, - mapKeys: m.refValue.MapRange(), - len: m.refValue.Len(), + Adapter: m.Adapter, + mapKeys: m.refValue.MapRange(), + len: m.refValue.Len(), } } @@ -480,9 +471,9 @@ func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) { // Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. func (a *refValMapAccessor) Iterator() traits.Iterator { return &mapIterator{ - TypeAdapter: DefaultTypeAdapter, - mapKeys: reflect.ValueOf(a.mapVal).MapRange(), - len: len(a.mapVal), + Adapter: DefaultTypeAdapter, + mapKeys: reflect.ValueOf(a.mapVal).MapRange(), + len: len(a.mapVal), } } @@ -524,15 +515,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } -func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]any) mapAccessor { +func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ - TypeAdapter: adapter, - mapVal: mapVal, + Adapter: adapter, + mapVal: mapVal, } } type stringIfaceMapAccessor struct { - ref.TypeAdapter + Adapter mapVal map[string]any } @@ -569,7 +560,7 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { - ref.TypeAdapter + Adapter value *pb.Map } @@ -772,9 +763,9 @@ func (m *protoMap) Iterator() traits.Iterator { return true }) return &protoMapIterator{ - TypeAdapter: m.TypeAdapter, - mapKeys: mapKeys, - len: m.value.Len(), + Adapter: m.Adapter, + mapKeys: mapKeys, + len: m.value.Len(), } } @@ -795,7 +786,7 @@ func (m *protoMap) Value() any { type mapIterator struct { *baseIterator - ref.TypeAdapter + Adapter mapKeys *reflect.MapIter cursor int len int @@ -818,7 +809,7 @@ func (it *mapIterator) Next() ref.Val { type protoMapIterator struct { *baseIterator - ref.TypeAdapter + Adapter mapKeys []protoreflect.MapKey cursor int len int diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 38927a112..926ca3dc9 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -30,8 +30,6 @@ import ( type Null structpb.NullValue var ( - // NullType singleton. - NullType = NewTypeValue("null_type") // NullValue singleton. NullValue = Null(structpb.NullValue_NULL_VALUE) diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index 9955e2dce..8ba0af9fb 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -29,10 +29,10 @@ import ( ) type protoObj struct { - ref.TypeAdapter + Adapter value proto.Message typeDesc *pb.TypeDescription - typeValue *TypeValue + typeValue ref.Val } // NewObject returns an object based on a proto.Message value which handles @@ -42,15 +42,15 @@ type protoObj struct { // Note: the type value is pulled from the list of registered types within the // type provider. If the proto type is not registered within the type provider, // then this will result in an error within the type adapter / provider. -func NewObject(adapter ref.TypeAdapter, +func NewObject(adapter Adapter, typeDesc *pb.TypeDescription, - typeValue *TypeValue, + typeValue ref.Val, value proto.Message) ref.Val { return &protoObj{ - TypeAdapter: adapter, - value: value, - typeDesc: typeDesc, - typeValue: typeValue} + Adapter: adapter, + value: value, + typeDesc: typeDesc, + typeValue: typeValue} } func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { @@ -157,7 +157,7 @@ func (o *protoObj) Get(index ref.Val) ref.Val { } func (o *protoObj) Type() ref.Type { - return o.typeValue + return o.typeValue.(ref.Type) } func (o *protoObj) Value() any { diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go index 54cb35b1a..a9f30aed0 100644 --- a/vendor/github.com/google/cel-go/common/types/optional.go +++ b/vendor/github.com/google/cel-go/common/types/optional.go @@ -24,7 +24,7 @@ import ( var ( // OptionalType indicates the runtime type of an optional value. - OptionalType = NewTypeValue("optional") + OptionalType = NewOpaqueType("optional") // OptionalNone is a sentinel value which is used to indicate an empty optional value. OptionalNone = &Optional{} diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index df9532156..6cc95c276 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -285,7 +285,7 @@ func (fd *FieldDescription) GetFrom(target any) (any, error) { // IsEnum returns true if the field type refers to an enum value. func (fd *FieldDescription) IsEnum() bool { - return fd.desc.Kind() == protoreflect.EnumKind + return fd.ProtoKind() == protoreflect.EnumKind } // IsMap returns true if the field is of map type. @@ -295,7 +295,7 @@ func (fd *FieldDescription) IsMap() bool { // IsMessage returns true if the field is of message type. func (fd *FieldDescription) IsMessage() bool { - kind := fd.desc.Kind() + kind := fd.ProtoKind() return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind } @@ -326,6 +326,11 @@ func (fd *FieldDescription) Name() string { return string(fd.desc.Name()) } +// ProtoKind returns the protobuf reflected kind of the field. +func (fd *FieldDescription) ProtoKind() protoreflect.Kind { + return fd.desc.Kind() +} + // ReflectType returns the Golang reflect.Type for this field. func (fd *FieldDescription) ReflectType() reflect.Type { return fd.reflectType @@ -345,17 +350,17 @@ func (fd *FieldDescription) Zero() proto.Message { } func (fd *FieldDescription) typeDefToType() *exprpb.Type { - if fd.desc.Kind() == protoreflect.MessageKind || fd.desc.Kind() == protoreflect.GroupKind { + if fd.IsMessage() { msgType := string(fd.desc.Message().FullName()) if wk, found := CheckedWellKnowns[msgType]; found { return wk } return checkedMessageType(msgType) } - if fd.desc.Kind() == protoreflect.EnumKind { + if fd.IsEnum() { return checkedInt } - return CheckedPrimitives[fd.desc.Kind()] + return CheckedPrimitives[fd.ProtoKind()] } // Map wraps the protoreflect.Map object with a key and value FieldDescription for use in @@ -463,13 +468,13 @@ func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, er unwrappedAny := &anypb.Any{} err := Merge(unwrappedAny, msg) if err != nil { - return nil, false, err + return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err) } dynMsg, err := unwrappedAny.UnmarshalNew() if err != nil { // Allow the error to move further up the stack as it should result in an type // conversion error if the caller does not recover it somehow. - return nil, false, err + return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err) } // Attempt to unwrap the dynamic type, otherwise return the dynamic message. unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect()) @@ -560,8 +565,10 @@ func zeroValueOf(msg proto.Message) proto.Message { } var ( + jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value" + zeroValueMap = map[string]proto.Message{ - "google.protobuf.Any": &anypb.Any{}, + "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL}, "google.protobuf.Duration": &dpb.Duration{}, "google.protobuf.ListValue": &structpb.ListValue{}, "google.protobuf.Struct": &structpb.Struct{}, diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index e66951f5b..e80b4622e 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -33,17 +33,64 @@ import ( tpb "google.golang.org/protobuf/types/known/timestamppb" ) -type protoTypeRegistry struct { - revTypeMap map[string]ref.Type +// Adapter converts native Go values of varying type and complexity to equivalent CEL values. +type Adapter = ref.TypeAdapter + +// Provider specifies functions for creating new object instances and for resolving +// enum values by name. +type Provider interface { + // EnumValue returns the numeric value of the given enum value name. + EnumValue(enumName string) ref.Val + + // FindIdent takes a qualified identifier name and returns a ref.Val if one exists. + FindIdent(identName string) (ref.Val, bool) + + // FindStructType returns the Type give a qualified type name. + // + // For historical reasons, only struct types are expected to be returned through this + // method, and the type values are expected to be wrapped in a TypeType instance using + // TypeTypeWithParam(). + // + // Returns false if not found. + FindStructType(structType string) (*Type, bool) + + // FieldStructFieldType returns the field type for a checked type value. Returns + // false if the field could not be found. + FindStructFieldType(structType, fieldName string) (*FieldType, bool) + + // NewValue creates a new type value from a qualified name and map of field + // name to value. + // + // Note, for each value, the Val.ConvertToNative function will be invoked + // to convert the Val to the field's native type. If an error occurs during + // conversion, the NewValue will be a types.Err. + NewValue(structType string, fields map[string]ref.Val) ref.Val +} + +// FieldType represents a field's type value and whether that field supports presence detection. +type FieldType struct { + // Type of the field as a CEL native type value. + Type *Type + + // IsSet indicates whether the field is set on an input object. + IsSet ref.FieldTester + + // GetFrom retrieves the field value on the input object, if set. + GetFrom ref.FieldGetter +} + +// Registry provides type information for a set of registered types. +type Registry struct { + revTypeMap map[string]*Type pbdb *pb.Db } // NewRegistry accepts a list of proto message instances and returns a type // provider which can create new instances of the provided message or any // message that proto depends upon in its FileDescriptor. -func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) { - p := &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), +func NewRegistry(types ...proto.Message) (*Registry, error) { + p := &Registry{ + revTypeMap: make(map[string]*Type), pbdb: pb.NewDb(), } err := p.RegisterType( @@ -79,18 +126,17 @@ func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) { } // NewEmptyRegistry returns a registry which is completely unconfigured. -func NewEmptyRegistry() ref.TypeRegistry { - return &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), +func NewEmptyRegistry() *Registry { + return &Registry{ + revTypeMap: make(map[string]*Type), pbdb: pb.NewDb(), } } -// Copy implements the ref.TypeRegistry interface method which copies the current state of the -// registry into its own memory space. -func (p *protoTypeRegistry) Copy() ref.TypeRegistry { - copy := &protoTypeRegistry{ - revTypeMap: make(map[string]ref.Type), +// Copy copies the current state of the registry into its own memory space. +func (p *Registry) Copy() *Registry { + copy := &Registry{ + revTypeMap: make(map[string]*Type), pbdb: p.pbdb.Copy(), } for k, v := range p.revTypeMap { @@ -99,7 +145,8 @@ func (p *protoTypeRegistry) Copy() ref.TypeRegistry { return copy } -func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val { +// EnumValue returns the numeric value of the given enum value name. +func (p *Registry) EnumValue(enumName string) ref.Val { enumVal, found := p.pbdb.DescribeEnum(enumName) if !found { return NewErr("unknown enum name '%s'", enumName) @@ -107,9 +154,12 @@ func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val { return Int(enumVal.Value()) } -func (p *protoTypeRegistry) FindFieldType(messageType string, - fieldName string) (*ref.FieldType, bool) { - msgType, found := p.pbdb.DescribeType(messageType) +// FieldFieldType returns the field type for a checked type value. Returns false if +// the field could not be found. +// +// Deprecated: use FindStructFieldType +func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) if !found { return nil, false } @@ -118,15 +168,32 @@ func (p *protoTypeRegistry) FindFieldType(messageType string, return nil, false } return &ref.FieldType{ - Type: field.CheckedType(), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, - true + Type: field.CheckedType(), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true +} + +// FieldStructFieldType returns the field type for a checked type value. Returns +// false if the field could not be found. +func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return nil, false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return nil, false + } + return &FieldType{ + Type: fieldDescToCELType(field), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true } -func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) { +// FindIdent takes a qualified identifier name and returns a ref.Val if one exists. +func (p *Registry) FindIdent(identName string) (ref.Val, bool) { if t, found := p.revTypeMap[identName]; found { - return t.(ref.Val), true + return t, true } if enumVal, found := p.pbdb.DescribeEnum(identName); found { return Int(enumVal.Value()), true @@ -134,24 +201,50 @@ func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) { return nil, false } -func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) { - if _, found := p.pbdb.DescribeType(typeName); !found { +// FindType looks up the Type given a qualified typeName. Returns false if not found. +// +// Deprecated: use FindStructType +func (p *Registry) FindType(structType string) (*exprpb.Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { return nil, false } - if typeName != "" && typeName[0] == '.' { - typeName = typeName[1:] + if structType != "" && structType[0] == '.' { + structType = structType[1:] } return &exprpb.Type{ TypeKind: &exprpb.Type_Type{ Type: &exprpb.Type{ TypeKind: &exprpb.Type_MessageType{ - MessageType: typeName}}}}, true + MessageType: structType}}}}, true +} + +// FindStructType returns the Type give a qualified type name. +// +// For historical reasons, only struct types are expected to be returned through this +// method, and the type values are expected to be wrapped in a TypeType instance using +// TypeTypeWithParam(). +// +// Returns false if not found. +func (p *Registry) FindStructType(structType string) (*Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { + return nil, false + } + if structType != "" && structType[0] == '.' { + structType = structType[1:] + } + return NewTypeTypeWithParam(NewObjectType(structType)), true } -func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val { - td, found := p.pbdb.DescribeType(typeName) +// NewValue creates a new type value from a qualified name and map of field +// name to value. +// +// Note, for each value, the Val.ConvertToNative function will be invoked +// to convert the Val to the field's native type. If an error occurs during +// conversion, the NewValue will be a types.Err. +func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val { + td, found := p.pbdb.DescribeType(structType) if !found { - return NewErr("unknown type '%s'", typeName) + return NewErr("unknown type '%s'", structType) } msg := td.New() fieldMap := td.FieldMap() @@ -168,7 +261,8 @@ func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) return p.NativeToValue(msg.Interface()) } -func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { +// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. +func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { fd, err := p.pbdb.RegisterDescriptor(fileDesc) if err != nil { return err @@ -176,7 +270,8 @@ func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescrip return p.registerAllTypes(fd) } -func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error { +// RegisterMessage registers a protocol buffer message and its dependencies. +func (p *Registry) RegisterMessage(message proto.Message) error { fd, err := p.pbdb.RegisterMessage(message) if err != nil { return err @@ -184,11 +279,32 @@ func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error { return p.registerAllTypes(fd) } -func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error { +// RegisterType registers a type value with the provider which ensures the provider is aware of how to +// map the type to an identifier. +// +// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name. +// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the +// traits present on the input and the runtime type name. By default this foreign type will be treated +// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the +// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions +// to CEL, even when they're not based on protobuf types. +func (p *Registry) RegisterType(types ...ref.Type) error { for _, t := range types { - p.revTypeMap[t.TypeName()] = t + celType := maybeForeignType(t) + existing, found := p.revTypeMap[t.TypeName()] + if !found { + p.revTypeMap[t.TypeName()] = celType + continue + } + if !existing.IsEquivalentType(celType) { + return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType) + } + if existing.traitMask != celType.traitMask { + return fmt.Errorf( + "type registered with conflicting traits: %v with traits %v, input: %v", + existing.TypeName(), existing.traitMask, celType.traitMask) + } } - // TODO: generate an error when the type name is registered more than once. return nil } @@ -196,7 +312,7 @@ func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error { // providing support for custom proto-based types. // // This method should be the inverse of ref.Val.ConvertToNative. -func (p *protoTypeRegistry) NativeToValue(value any) ref.Val { +func (p *Registry) NativeToValue(value any) ref.Val { if val, found := nativeToValue(p, value); found { return val } @@ -218,7 +334,7 @@ func (p *protoTypeRegistry) NativeToValue(value any) ref.Val { if !found { return NewErr("unknown type: '%s'", typeName) } - return NewObject(p, td, typeVal.(*TypeValue), v) + return NewObject(p, td, typeVal, v) case *pb.Map: return NewProtoMap(p, v) case protoreflect.List: @@ -231,8 +347,13 @@ func (p *protoTypeRegistry) NativeToValue(value any) ref.Val { return UnsupportedRefValConversionErr(value) } -func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error { +func (p *Registry) registerAllTypes(fd *pb.FileDescription) error { for _, typeName := range fd.GetTypeNames() { + // skip well-known type names since they're automatically sanitized + // during NewObjectType() calls. + if _, found := checkedWellKnowns[typeName]; found { + continue + } err := p.RegisterType(NewObjectTypeValue(typeName)) if err != nil { return err @@ -241,6 +362,28 @@ func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error { return nil } +func fieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMap() { + return NewMapType( + singularFieldDescToCELType(field.KeyType), + singularFieldDescToCELType(field.ValueType)) + } + if field.IsList() { + return NewListType(singularFieldDescToCELType(field)) + } + return singularFieldDescToCELType(field) +} + +func singularFieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMessage() { + return NewObjectType(string(field.Descriptor().Message().FullName())) + } + if field.IsEnum() { + return IntType + } + return ProtoCELPrimitives[field.ProtoKind()] +} + // defaultTypeAdapter converts go native types to CEL values. type defaultTypeAdapter struct{} @@ -259,7 +402,7 @@ func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val { // nativeToValue returns the converted (ref.Val, true) of a conversion is found, // otherwise (nil, false) -func nativeToValue(a ref.TypeAdapter, value any) (ref.Val, bool) { +func nativeToValue(a Adapter, value any) (ref.Val, bool) { switch v := value.(type) { case nil: return NullValue, true @@ -547,3 +690,24 @@ func fieldTypeConversionError(field *pb.FieldDescription, err error) error { msgName := field.Descriptor().ContainingMessage().FullName() return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) } + +var ( + // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type. + ProtoCELPrimitives = map[protoreflect.Kind]*Type{ + protoreflect.BoolKind: BoolType, + protoreflect.BytesKind: BytesType, + protoreflect.DoubleKind: DoubleType, + protoreflect.FloatKind: DoubleType, + protoreflect.Int32Kind: IntType, + protoreflect.Int64Kind: IntType, + protoreflect.Sint32Kind: IntType, + protoreflect.Sint64Kind: IntType, + protoreflect.Uint32Kind: UintType, + protoreflect.Uint64Kind: UintType, + protoreflect.Fixed32Kind: UintType, + protoreflect.Fixed64Kind: UintType, + protoreflect.Sfixed32Kind: IntType, + protoreflect.Sfixed64Kind: IntType, + protoreflect.StringKind: StringType, + } +) diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go index 7eabbb9ca..b9820023d 100644 --- a/vendor/github.com/google/cel-go/common/types/ref/provider.go +++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go @@ -23,34 +23,34 @@ import ( // TypeProvider specifies functions for creating new object instances and for // resolving enum values by name. +// +// Deprecated: use types.Provider type TypeProvider interface { // EnumValue returns the numeric value of the given enum value name. EnumValue(enumName string) Val - // FindIdent takes a qualified identifier name and returns a Value if one - // exists. + // FindIdent takes a qualified identifier name and returns a Value if one exists. FindIdent(identName string) (Val, bool) - // FindType looks up the Type given a qualified typeName. Returns false - // if not found. - // - // Used during type-checking only. + // FindType looks up the Type given a qualified typeName. Returns false if not found. FindType(typeName string) (*exprpb.Type, bool) - // FieldFieldType returns the field type for a checked type value. Returns - // false if the field could not be found. - FindFieldType(messageType string, fieldName string) (*FieldType, bool) + // FieldFieldType returns the field type for a checked type value. Returns false if + // the field could not be found. + FindFieldType(messageType, fieldName string) (*FieldType, bool) - // NewValue creates a new type value from a qualified name and map of field - // name to value. + // NewValue creates a new type value from a qualified name and map of field name + // to value. // - // Note, for each value, the Val.ConvertToNative function will be invoked - // to convert the Val to the field's native type. If an error occurs during - // conversion, the NewValue will be a types.Err. + // Note, for each value, the Val.ConvertToNative function will be invoked to convert + // the Val to the field's native type. If an error occurs during conversion, the + // NewValue will be a types.Err. NewValue(typeName string, fields map[string]Val) Val } // TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. +// +// Deprecated: use types.Adapter type TypeAdapter interface { // NativeToValue converts the input `value` to a CEL `ref.Val`. NativeToValue(value any) Val @@ -60,6 +60,8 @@ type TypeAdapter interface { // implementations support type-customization, so these features are optional. However, a // `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types // which are registered can be converted to CEL representations. +// +// Deprecated: use types.Registry type TypeRegistry interface { TypeAdapter TypeProvider @@ -76,15 +78,14 @@ type TypeRegistry interface { // If a type is provided more than once with an alternative definition, the // call will result in an error. RegisterType(types ...Type) error - - // Copy the TypeRegistry and return a new registry whose mutable state is isolated. - Copy() TypeRegistry } // FieldType represents a field's type value and whether that field supports // presence detection. +// +// Deprecated: use types.FieldType type FieldType struct { - // Type of the field. + // Type of the field as a protobuf type value. Type *exprpb.Type // IsSet indicates whether the field is set on an input object. diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go index a65cc14e4..028e6824d 100644 --- a/vendor/github.com/google/cel-go/common/types/string.go +++ b/vendor/github.com/google/cel-go/common/types/string.go @@ -24,7 +24,6 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -36,18 +35,10 @@ import ( type String string var ( - // StringType singleton. - StringType = NewTypeValue("string", - traits.AdderType, - traits.ComparerType, - traits.MatcherType, - traits.ReceiverType, - traits.SizerType) - - stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{ - overloads.Contains: stringContains, - overloads.EndsWith: stringEndsWith, - overloads.StartsWith: stringStartsWith, + stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{ + overloads.Contains: StringContains, + overloads.EndsWith: StringEndsWith, + overloads.StartsWith: StringStartsWith, } stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) @@ -198,26 +189,41 @@ func (s String) Value() any { return string(s) } -func stringContains(s String, sub ref.Val) ref.Val { +// StringContains returns whether the string contains a substring. +func StringContains(s, sub ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } subStr, ok := sub.(String) if !ok { return MaybeNoSuchOverloadErr(sub) } - return Bool(strings.Contains(string(s), string(subStr))) + return Bool(strings.Contains(string(str), string(subStr))) } -func stringEndsWith(s String, suf ref.Val) ref.Val { +// StringEndsWith returns whether the target string contains the input suffix. +func StringEndsWith(s, suf ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } sufStr, ok := suf.(String) if !ok { return MaybeNoSuchOverloadErr(suf) } - return Bool(strings.HasSuffix(string(s), string(sufStr))) + return Bool(strings.HasSuffix(string(str), string(sufStr))) } -func stringStartsWith(s String, pre ref.Val) ref.Val { +// StringStartsWith returns whether the target string contains the input prefix. +func StringStartsWith(s, pre ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } preStr, ok := pre.(String) if !ok { return MaybeNoSuchOverloadErr(pre) } - return Bool(strings.HasPrefix(string(s), string(preStr))) + return Bool(strings.HasPrefix(string(str), string(preStr))) } diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go index c784f2e54..33acdea8e 100644 --- a/vendor/github.com/google/cel-go/common/types/timestamp.go +++ b/vendor/github.com/google/cel-go/common/types/timestamp.go @@ -23,7 +23,6 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -53,15 +52,6 @@ const ( maxUnixTime int64 = 253402300799 ) -var ( - // TimestampType singleton. - TimestampType = NewTypeValue("google.protobuf.Timestamp", - traits.AdderType, - traits.ComparerType, - traits.ReceiverType, - traits.SubtractorType) -) - // Add implements traits.Adder.Add. func (t Timestamp) Add(other ref.Val) ref.Val { switch other.Type() { diff --git a/vendor/github.com/google/cel-go/common/types/type.go b/vendor/github.com/google/cel-go/common/types/type.go deleted file mode 100644 index 164a46050..000000000 --- a/vendor/github.com/google/cel-go/common/types/type.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -var ( - // TypeType is the type of a TypeValue. - TypeType = NewTypeValue("type") -) - -// TypeValue is an instance of a Value that describes a value's type. -type TypeValue struct { - name string - traitMask int -} - -// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val. -func NewTypeValue(name string, traits ...int) *TypeValue { - traitMask := 0 - for _, trait := range traits { - traitMask |= trait - } - return &TypeValue{ - name: name, - traitMask: traitMask} -} - -// NewObjectTypeValue returns a *TypeValue based on the input name, which is -// annotated with the traits relevant to all objects. -func NewObjectTypeValue(name string) *TypeValue { - return NewTypeValue(name, - traits.FieldTesterType, - traits.IndexerType) -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (any, error) { - // TODO: replace the internal type representation with a proto-value. - return nil, fmt.Errorf("type conversion not supported for 'type'") -} - -// ConvertToType implements ref.Val.ConvertToType. -func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case TypeType: - return TypeType - case StringType: - return String(t.TypeName()) - } - return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) -} - -// Equal implements ref.Val.Equal. -func (t *TypeValue) Equal(other ref.Val) ref.Val { - otherType, ok := other.(ref.Type) - return Bool(ok && t.TypeName() == otherType.TypeName()) -} - -// HasTrait indicates whether the type supports the given trait. -// Trait codes are defined in the traits package, e.g. see traits.AdderType. -func (t *TypeValue) HasTrait(trait int) bool { - return trait&t.traitMask == trait -} - -// String implements fmt.Stringer. -func (t *TypeValue) String() string { - return t.name -} - -// Type implements ref.Val.Type. -func (t *TypeValue) Type() ref.Type { - return TypeType -} - -// TypeName gives the type's name as a string. -func (t *TypeValue) TypeName() string { - return t.name -} - -// Value implements ref.Val.Value. -func (t *TypeValue) Value() any { - return t.name -} diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go new file mode 100644 index 000000000..76624eefd --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -0,0 +1,806 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Kind indicates a CEL type's kind which is used to differentiate quickly between simple +// and complex types. +type Kind uint + +const ( + // UnspecifiedKind is returned when the type is nil or its kind is not specified. + UnspecifiedKind Kind = iota + + // DynKind represents a dynamic type. This kind only exists at type-check time. + DynKind + + // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. + // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf + // well-known types. + AnyKind + + // BoolKind represents a boolean type. + BoolKind + + // BytesKind represents a bytes type. + BytesKind + + // DoubleKind represents a double type. + DoubleKind + + // DurationKind represents a CEL duration type. + DurationKind + + // ErrorKind represents a CEL error type. + ErrorKind + + // IntKind represents an integer type. + IntKind + + // ListKind represents a list type. + ListKind + + // MapKind represents a map type. + MapKind + + // NullTypeKind represents a null type. + NullTypeKind + + // OpaqueKind represents an abstract type which has no accessible fields. + OpaqueKind + + // StringKind represents a string type. + StringKind + + // StructKind represents a structured object with typed fields. + StructKind + + // TimestampKind represents a a CEL time type. + TimestampKind + + // TypeKind represents the CEL type. + TypeKind + + // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. + TypeParamKind + + // UintKind represents a uint type. + UintKind + + // UnknownKind represents an unknown value type. + UnknownKind +) + +var ( + // AnyType represents the google.protobuf.Any type. + AnyType = &Type{ + kind: AnyKind, + runtimeTypeName: "google.protobuf.Any", + traitMask: traits.FieldTesterType | + traits.IndexerType, + } + // BoolType represents the bool type. + BoolType = &Type{ + kind: BoolKind, + runtimeTypeName: "bool", + traitMask: traits.ComparerType | + traits.NegatorType, + } + // BytesType represents the bytes type. + BytesType = &Type{ + kind: BytesKind, + runtimeTypeName: "bytes", + traitMask: traits.AdderType | + traits.ComparerType | + traits.SizerType, + } + // DoubleType represents the double type. + DoubleType = &Type{ + kind: DoubleKind, + runtimeTypeName: "double", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // DurationType represents the CEL duration type. + DurationType = &Type{ + kind: DurationKind, + runtimeTypeName: "google.protobuf.Duration", + traitMask: traits.AdderType | + traits.ComparerType | + traits.NegatorType | + traits.ReceiverType | + traits.SubtractorType, + } + // DynType represents a dynamic CEL type whose type will be determined at runtime from context. + DynType = &Type{ + kind: DynKind, + runtimeTypeName: "dyn", + } + // ErrorType represents a CEL error value. + ErrorType = &Type{ + kind: ErrorKind, + runtimeTypeName: "error", + } + // IntType represents the int type. + IntType = &Type{ + kind: IntKind, + runtimeTypeName: "int", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // ListType represents the runtime list type. + ListType = NewListType(nil) + // MapType represents the runtime map type. + MapType = NewMapType(nil, nil) + // NullType represents the type of a null value. + NullType = &Type{ + kind: NullTypeKind, + runtimeTypeName: "null_type", + } + // StringType represents the string type. + StringType = &Type{ + kind: StringKind, + runtimeTypeName: "string", + traitMask: traits.AdderType | + traits.ComparerType | + traits.MatcherType | + traits.ReceiverType | + traits.SizerType, + } + // TimestampType represents the time type. + TimestampType = &Type{ + kind: TimestampKind, + runtimeTypeName: "google.protobuf.Timestamp", + traitMask: traits.AdderType | + traits.ComparerType | + traits.ReceiverType | + traits.SubtractorType, + } + // TypeType represents a CEL type + TypeType = &Type{ + kind: TypeKind, + runtimeTypeName: "type", + } + // UintType represents a uint type. + UintType = &Type{ + kind: UintKind, + runtimeTypeName: "uint", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.SubtractorType, + } + // UnknownType represents an unknown value type. + UnknownType = &Type{ + kind: UnknownKind, + runtimeTypeName: "unknown", + } +) + +var _ ref.Type = &Type{} +var _ ref.Val = &Type{} + +// Type holds a reference to a runtime type with an optional type-checked set of type parameters. +type Type struct { + // kind indicates general category of the type. + kind Kind + + // parameters holds the optional type-checked set of type Parameters that are used during static analysis. + parameters []*Type + + // runtimeTypeName indicates the runtime type name of the type. + runtimeTypeName string + + // isAssignableType function determines whether one type is assignable to this type. + // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. + isAssignableType func(other *Type) bool + + // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. + // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. + isAssignableRuntimeType func(other ref.Val) bool + + // traitMask is a mask of flags which indicate the capabilities of the type. + traitMask int +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion not supported for 'type'") +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *Type) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case TypeType: + return TypeType + case StringType: + return String(t.TypeName()) + } + return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) +} + +// Equal indicates whether two types have the same runtime type name. +// +// The name Equal is a bit of a misnomer, but for historical reasons, this is the +// runtime behavior. For a more accurate definition see IsType(). +func (t *Type) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait implements the ref.Type interface method. +func (t *Type) HasTrait(trait int) bool { + return trait&t.traitMask == trait +} + +// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names. +func (t *Type) IsExactType(other *Type) bool { + return t.isTypeInternal(other, true) +} + +// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names. +func (t *Type) IsEquivalentType(other *Type) bool { + return t.isTypeInternal(other, false) +} + +// Kind indicates general category of the type. +func (t *Type) Kind() Kind { + if t == nil { + return UnspecifiedKind + } + return t.kind +} + +// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag. +func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool { + if t == nil { + return false + } + if t == other { + return true + } + if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) { + return false + } + if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() { + return false + } + for i, p := range t.Parameters() { + if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) { + return false + } + } + return true +} + +// IsAssignableType determines whether the current type is type-check assignable from the input fromType. +func (t *Type) IsAssignableType(fromType *Type) bool { + if t == nil { + return false + } + if t.isAssignableType != nil { + return t.isAssignableType(fromType) + } + return t.defaultIsAssignableType(fromType) +} + +// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. +// +// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) +// will have a runtime assignable type of a map. +func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { + if t == nil { + return false + } + if t.isAssignableRuntimeType != nil { + return t.isAssignableRuntimeType(val) + } + return t.defaultIsAssignableRuntimeType(val) +} + +// Parameters returns the list of type parameters if set. +// +// For ListKind, Parameters()[0] represents the list element type +// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map +// value type. +func (t *Type) Parameters() []*Type { + if t == nil { + return emptyParams + } + return t.parameters +} + +// DeclaredTypeName indicates the fully qualified and parameterized type-check type name. +func (t *Type) DeclaredTypeName() string { + // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type. + if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) { + return fmt.Sprintf("wrapper(%s)", t.TypeName()) + } + return t.TypeName() +} + +// Type implements the ref.Val interface method. +func (t *Type) Type() ref.Type { + return TypeType +} + +// Value implements the ref.Val interface method. +func (t *Type) Value() any { + return t.TypeName() +} + +// TypeName returns the type-erased fully qualified runtime type name. +// +// TypeName implements the ref.Type interface method. +func (t *Type) TypeName() string { + if t == nil { + return "" + } + return t.runtimeTypeName +} + +// String returns a human-readable definition of the type name. +func (t *Type) String() string { + if len(t.Parameters()) == 0 { + return t.DeclaredTypeName() + } + params := make([]string, len(t.Parameters())) + for i, p := range t.Parameters() { + params[i] = p.String() + } + return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", ")) +} + +// isDyn indicates whether the type is dynamic in any way. +func (t *Type) isDyn() bool { + k := t.Kind() + return k == DynKind || k == AnyKind || k == TypeParamKind +} + +// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another +// where any of the following may return a true result: +// - The from types are the same instance +// - The target type is dynamic +// - The fromType has the same kind and type name as the target type, and all parameters of the target type +// +// are IsAssignableType() from the parameters of the fromType. +func (t *Type) defaultIsAssignableType(fromType *Type) bool { + if t == fromType || t.isDyn() { + return true + } + if t.Kind() != fromType.Kind() || + t.TypeName() != fromType.TypeName() || + len(t.Parameters()) != len(fromType.Parameters()) { + return false + } + for i, tp := range t.Parameters() { + fp := fromType.Parameters()[i] + if !tp.IsAssignableType(fp) { + return false + } + } + return true +} + +// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types +// to determine whether a ref.Val is assignable to the declared type for a function signature. +func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { + valType := val.Type() + // If the current type and value type don't agree, then return + if !(t.isDyn() || t.TypeName() == valType.TypeName()) { + return false + } + switch t.Kind() { + case ListKind: + elemType := t.Parameters()[0] + l := val.(traits.Lister) + if l.Size() == IntZero { + return true + } + it := l.Iterator() + elemVal := it.Next() + return elemType.IsAssignableRuntimeType(elemVal) + case MapKind: + keyType := t.Parameters()[0] + elemType := t.Parameters()[1] + m := val.(traits.Mapper) + if m.Size() == IntZero { + return true + } + it := m.Iterator() + keyVal := it.Next() + elemVal := m.Get(keyVal) + return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) + } + return true +} + +// NewListType creates an instances of a list type value with the provided element type. +func NewListType(elemType *Type) *Type { + return &Type{ + kind: ListKind, + parameters: []*Type{elemType}, + runtimeTypeName: "list", + traitMask: traits.AdderType | + traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewMapType creates an instance of a map type value with the provided key and value types. +func NewMapType(keyType, valueType *Type) *Type { + return &Type{ + kind: MapKind, + parameters: []*Type{keyType, valueType}, + runtimeTypeName: "map", + traitMask: traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewNullableType creates an instance of a nullable type with the provided wrapped type. +// +// Note: only primitive types are supported as wrapped types. +func NewNullableType(wrapped *Type) *Type { + return &Type{ + kind: wrapped.Kind(), + parameters: wrapped.Parameters(), + runtimeTypeName: wrapped.TypeName(), + traitMask: wrapped.traitMask, + isAssignableType: func(other *Type) bool { + return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) + }, + isAssignableRuntimeType: func(other ref.Val) bool { + return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) + }, + } +} + +// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. +func NewOptionalType(param *Type) *Type { + return NewOpaqueType("optional", param) +} + +// NewOpaqueType creates an abstract parameterized type with a given name. +func NewOpaqueType(name string, params ...*Type) *Type { + return &Type{ + kind: OpaqueKind, + parameters: params, + runtimeTypeName: name, + } +} + +// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type. +// +// An object type is assumed to support field presence testing and field indexing. Additionally, the +// type may also indicate additional traits through the use of the optional traits vararg argument. +func NewObjectType(typeName string, traits ...int) *Type { + // Function sanitizes object types on the fly + if wkt, found := checkedWellKnowns[typeName]; found { + return wkt + } + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: structTypeTraitMask | traitMask, + } +} + +// NewObjectTypeValue creates a type reference to an externally defined type. +// +// Deprecated: use cel.ObjectType(typeName) +func NewObjectTypeValue(typeName string) *Type { + return NewObjectType(typeName) +} + +// NewTypeValue creates an opaque type which has a set of optional type traits as defined in +// the common/types/traits package. +// +// Deprecated: use cel.ObjectType(typeName, traits) +func NewTypeValue(typeName string, traits ...int) *Type { + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: traitMask, + } +} + +// NewTypeParamType creates a parameterized type instance. +func NewTypeParamType(paramName string) *Type { + return &Type{ + kind: TypeParamKind, + runtimeTypeName: paramName, + } +} + +// NewTypeTypeWithParam creates a type with a type parameter. +// Used for type-checking purposes, but equivalent to TypeType otherwise. +func NewTypeTypeWithParam(param *Type) *Type { + return &Type{ + kind: TypeKind, + runtimeTypeName: "type", + parameters: []*Type{param}, + } +} + +// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. +func TypeToExprType(t *Type) (*exprpb.Type, error) { + switch t.Kind() { + case AnyKind: + return chkdecls.Any, nil + case BoolKind: + return maybeWrapper(t, chkdecls.Bool), nil + case BytesKind: + return maybeWrapper(t, chkdecls.Bytes), nil + case DoubleKind: + return maybeWrapper(t, chkdecls.Double), nil + case DurationKind: + return chkdecls.Duration, nil + case DynKind: + return chkdecls.Dyn, nil + case ErrorKind: + return chkdecls.Error, nil + case IntKind: + return maybeWrapper(t, chkdecls.Int), nil + case ListKind: + if len(t.Parameters()) != 1 { + return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters())) + } + et, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewListType(et), nil + case MapKind: + if len(t.Parameters()) != 2 { + return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters())) + } + kt, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + vt, err := TypeToExprType(t.Parameters()[1]) + if err != nil { + return nil, err + } + return chkdecls.NewMapType(kt, vt), nil + case NullTypeKind: + return chkdecls.Null, nil + case OpaqueKind: + params := make([]*exprpb.Type, len(t.Parameters())) + for i, p := range t.Parameters() { + pt, err := TypeToExprType(p) + if err != nil { + return nil, err + } + params[i] = pt + } + return chkdecls.NewAbstractType(t.TypeName(), params...), nil + case StringKind: + return maybeWrapper(t, chkdecls.String), nil + case StructKind: + return chkdecls.NewObjectType(t.TypeName()), nil + case TimestampKind: + return chkdecls.Timestamp, nil + case TypeParamKind: + return chkdecls.NewTypeParamType(t.TypeName()), nil + case TypeKind: + if len(t.Parameters()) == 1 { + p, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewTypeType(p), nil + } + return chkdecls.NewTypeType(nil), nil + case UintKind: + return maybeWrapper(t, chkdecls.Uint), nil + } + return nil, fmt.Errorf("missing type conversion to proto: %v", t) +} + +// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. +func ExprTypeToType(t *exprpb.Type) (*Type, error) { + switch t.GetTypeKind().(type) { + case *exprpb.Type_Dyn: + return DynType, nil + case *exprpb.Type_AbstractType_: + paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) + for i, p := range t.GetAbstractType().GetParameterTypes() { + pt, err := ExprTypeToType(p) + if err != nil { + return nil, err + } + paramTypes[i] = pt + } + return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil + case *exprpb.Type_ListType_: + et, err := ExprTypeToType(t.GetListType().GetElemType()) + if err != nil { + return nil, err + } + return NewListType(et), nil + case *exprpb.Type_MapType_: + kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + if err != nil { + return nil, err + } + vt, err := ExprTypeToType(t.GetMapType().GetValueType()) + if err != nil { + return nil, err + } + return NewMapType(kt, vt), nil + case *exprpb.Type_MessageType: + return NewObjectType(t.GetMessageType()), nil + case *exprpb.Type_Null: + return NullType, nil + case *exprpb.Type_Primitive: + switch t.GetPrimitive() { + case exprpb.Type_BOOL: + return BoolType, nil + case exprpb.Type_BYTES: + return BytesType, nil + case exprpb.Type_DOUBLE: + return DoubleType, nil + case exprpb.Type_INT64: + return IntType, nil + case exprpb.Type_STRING: + return StringType, nil + case exprpb.Type_UINT64: + return UintType, nil + default: + return nil, fmt.Errorf("unsupported primitive type: %v", t) + } + case *exprpb.Type_TypeParam: + return NewTypeParamType(t.GetTypeParam()), nil + case *exprpb.Type_Type: + if t.GetType().GetTypeKind() != nil { + p, err := ExprTypeToType(t.GetType()) + if err != nil { + return nil, err + } + return NewTypeTypeWithParam(p), nil + } + return TypeType, nil + case *exprpb.Type_WellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return AnyType, nil + case exprpb.Type_DURATION: + return DurationType, nil + case exprpb.Type_TIMESTAMP: + return TimestampType, nil + default: + return nil, fmt.Errorf("unsupported well-known type: %v", t) + } + case *exprpb.Type_Wrapper: + t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) + if err != nil { + return nil, err + } + return NewNullableType(t), nil + case *exprpb.Type_Error: + return ErrorType, nil + default: + return nil, fmt.Errorf("unsupported type: %v", t) + } +} + +func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { + if t.IsAssignableType(NullType) { + return chkdecls.NewWrapperType(pbType) + } + return pbType +} + +func maybeForeignType(t ref.Type) *Type { + if celType, ok := t.(*Type); ok { + return celType + } + // Inspect the incoming type to determine its traits. The assumption will be that the incoming + // type does not have any field values; however, if the trait mask indicates that field testing + // and indexing are supported, the foreign type is marked as a struct. + traitMask := 0 + for _, trait := range allTraits { + if t.HasTrait(trait) { + traitMask |= trait + } + } + // Treat the value like a struct. If it has no fields, this is harmless to denote the type + // as such since it basically becomes an opaque type by convention. + return NewObjectType(t.TypeName(), traitMask) +} + +var ( + checkedWellKnowns = map[string]*Type{ + // Wrapper types. + "google.protobuf.BoolValue": NewNullableType(BoolType), + "google.protobuf.BytesValue": NewNullableType(BytesType), + "google.protobuf.DoubleValue": NewNullableType(DoubleType), + "google.protobuf.FloatValue": NewNullableType(DoubleType), + "google.protobuf.Int64Value": NewNullableType(IntType), + "google.protobuf.Int32Value": NewNullableType(IntType), + "google.protobuf.UInt64Value": NewNullableType(UintType), + "google.protobuf.UInt32Value": NewNullableType(UintType), + "google.protobuf.StringValue": NewNullableType(StringType), + // Well-known types. + "google.protobuf.Any": AnyType, + "google.protobuf.Duration": DurationType, + "google.protobuf.Timestamp": TimestampType, + // Json types. + "google.protobuf.ListValue": NewListType(DynType), + "google.protobuf.NullValue": NullType, + "google.protobuf.Struct": NewMapType(StringType, DynType), + "google.protobuf.Value": DynType, + } + + emptyParams = []*Type{} + + allTraits = []int{ + traits.AdderType, + traits.ComparerType, + traits.ContainerType, + traits.DividerType, + traits.FieldTesterType, + traits.IndexerType, + traits.IterableType, + traits.IteratorType, + traits.MatcherType, + traits.ModderType, + traits.MultiplierType, + traits.NegatorType, + traits.ReceiverType, + traits.SizerType, + traits.SubtractorType, + } + + structTypeTraitMask = traits.FieldTesterType | traits.IndexerType +) diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go index 615c7ec52..3257f9ade 100644 --- a/vendor/github.com/google/cel-go/common/types/uint.go +++ b/vendor/github.com/google/cel-go/common/types/uint.go @@ -21,7 +21,6 @@ import ( "strconv" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -32,15 +31,6 @@ import ( type Uint uint64 var ( - // UintType singleton. - UintType = NewTypeValue("uint", - traits.AdderType, - traits.ComparerType, - traits.DividerType, - traits.ModderType, - traits.MultiplierType, - traits.SubtractorType) - uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go index bc411c15b..9dd2b2579 100644 --- a/vendor/github.com/google/cel-go/common/types/unknown.go +++ b/vendor/github.com/google/cel-go/common/types/unknown.go @@ -15,52 +15,312 @@ package types import ( + "fmt" + "math" "reflect" + "sort" + "strings" + "unicode" "github.com/google/cel-go/common/types/ref" ) -// Unknown type implementation which collects expression ids which caused the -// current value to become unknown. -type Unknown []int64 - var ( - // UnknownType singleton. - UnknownType = NewTypeValue("unknown") + unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}} ) +// NewAttributeTrail creates a new simple attribute from a variable name. +func NewAttributeTrail(variable string) *AttributeTrail { + if variable == "" { + return unspecifiedAttribute + } + return &AttributeTrail{variable: variable} +} + +// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to +// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable. +// +// The qualifer path elements adhere to the AttributeQualifier type constraint. +type AttributeTrail struct { + variable string + qualifierPath []any +} + +// Equal returns whether two attribute values have the same variable name and qualifier paths. +func (a *AttributeTrail) Equal(other *AttributeTrail) bool { + if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) { + return false + } + for i, q := range a.QualifierPath() { + qual := other.QualifierPath()[i] + if !qualifiersEqual(q, qual) { + return false + } + } + return true +} + +func qualifiersEqual(a, b any) bool { + if a == b { + return true + } + switch numA := a.(type) { + case int64: + numB, ok := b.(uint64) + if !ok { + return false + } + return intUintEqual(numA, numB) + case uint64: + numB, ok := b.(int64) + if !ok { + return false + } + return intUintEqual(numB, numA) + default: + return false + } +} + +func intUintEqual(i int64, u uint64) bool { + if i < 0 || u > math.MaxInt64 { + return false + } + return i == int64(u) +} + +// Variable returns the variable name associated with the attribute. +func (a *AttributeTrail) Variable() string { + return a.variable +} + +// QualifierPath returns the optional set of qualifying fields or indices applied to the variable. +func (a *AttributeTrail) QualifierPath() []any { + return a.qualifierPath +} + +// String returns the string representation of the Attribute. +func (a *AttributeTrail) String() string { + if a.variable == "" { + return "" + } + var str strings.Builder + str.WriteString(a.variable) + for _, q := range a.qualifierPath { + switch q := q.(type) { + case bool, int64: + str.WriteString(fmt.Sprintf("[%v]", q)) + case uint64: + str.WriteString(fmt.Sprintf("[%vu]", q)) + case string: + if isIdentifierCharacter(q) { + str.WriteString(fmt.Sprintf(".%v", q)) + } else { + str.WriteString(fmt.Sprintf("[%q]", q)) + } + } + } + return str.String() +} + +func isIdentifierCharacter(str string) bool { + for _, c := range str { + if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" { + continue + } + return false + } + return true +} + +// AttributeQualifier constrains the possible types which may be used to qualify an attribute. +type AttributeQualifier interface { + bool | int64 | uint64 | string +} + +// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type. +func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail { + attr.qualifierPath = append(attr.qualifierPath, qualifier) + return attr +} + +// Unknown type which collects expression ids which caused the current value to become unknown. +type Unknown struct { + attributeTrails map[int64][]*AttributeTrail +} + +// NewUnknown creates a new unknown at a given expression id for an attribute. +// +// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`. +func NewUnknown(id int64, attr *AttributeTrail) *Unknown { + if attr == nil { + attr = unspecifiedAttribute + } + return &Unknown{ + attributeTrails: map[int64][]*AttributeTrail{id: {attr}}, + } +} + +// IDs returns the set of unknown expression ids contained by this value. +// +// Numeric identifiers are guaranteed to be in sorted order. +func (u *Unknown) IDs() []int64 { + ids := make(int64Slice, len(u.attributeTrails)) + i := 0 + for id := range u.attributeTrails { + ids[i] = id + i++ + } + ids.Sort() + return ids +} + +// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id. +func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) { + trails, found := u.attributeTrails[id] + return trails, found +} + +// Contains returns true if the input unknown is a subset of the current unknown. +func (u *Unknown) Contains(other *Unknown) bool { + for id, otherTrails := range other.attributeTrails { + trails, found := u.attributeTrails[id] + if !found || len(otherTrails) != len(trails) { + return false + } + for _, ot := range otherTrails { + found := false + for _, t := range trails { + if t.Equal(ot) { + found = true + break + } + } + if !found { + return false + } + } + } + return true +} + // ConvertToNative implements ref.Val.ConvertToNative. -func (u Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { +func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { return u.Value(), nil } // ConvertToType is an identity function since unknown values cannot be modified. -func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val { +func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val { return u } // Equal is an identity function since unknown values cannot be modified. -func (u Unknown) Equal(other ref.Val) ref.Val { +func (u *Unknown) Equal(other ref.Val) ref.Val { return u } +// String implements the Stringer interface +func (u *Unknown) String() string { + var str strings.Builder + for id, attrs := range u.attributeTrails { + if str.Len() != 0 { + str.WriteString(", ") + } + if len(attrs) == 1 { + str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id)) + } else { + str.WriteString(fmt.Sprintf("%v (%d)", attrs, id)) + } + } + return str.String() +} + // Type implements ref.Val.Type. -func (u Unknown) Type() ref.Type { +func (u *Unknown) Type() ref.Type { return UnknownType } // Value implements ref.Val.Value. -func (u Unknown) Value() any { - return []int64(u) +func (u *Unknown) Value() any { + return u } -// IsUnknown returns whether the element ref.Type or ref.Val is equal to the -// UnknownType singleton. +// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown func IsUnknown(val ref.Val) bool { switch val.(type) { - case Unknown: + case *Unknown: return true default: return false } } + +// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce +// an unknown result. +// +// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input +// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil. +// If both values are non-nil and unknown, then the return value will be a merge of both unknowns. +func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) { + src, isUnk := val.(*Unknown) + if !isUnk { + if unk != nil { + return unk, true + } + return unk, false + } + return MergeUnknowns(src, unk), true +} + +// MergeUnknowns combines two unknown values into a new unknown value. +func MergeUnknowns(unk1, unk2 *Unknown) *Unknown { + if unk1 == nil { + return unk2 + } + if unk2 == nil { + return unk1 + } + out := &Unknown{ + attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)), + } + for id, ats := range unk1.attributeTrails { + out.attributeTrails[id] = ats + } + for id, ats := range unk2.attributeTrails { + existing, found := out.attributeTrails[id] + if !found { + out.attributeTrails[id] = ats + continue + } + + for _, at := range ats { + found := false + for _, et := range existing { + if at.Equal(et) { + found = true + break + } + } + if !found { + existing = append(existing, at) + } + } + out.attributeTrails[id] = existing + } + return out +} + +// int64Slice is an implementation of the sort.Interface +type int64Slice []int64 + +// Len returns the number of elements in the slice. +func (x int64Slice) Len() int { return len(x) } + +// Less indicates whether the value at index i is less than the value at index j. +func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] } + +// Swap swaps the values at indices i and j in place. +func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +// Sort is a convenience method: x.Sort() calls Sort(x). +func (x int64Slice) Sort() { sort.Sort(x) } diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go index a8e9afa9e..71662eee3 100644 --- a/vendor/github.com/google/cel-go/common/types/util.go +++ b/vendor/github.com/google/cel-go/common/types/util.go @@ -21,7 +21,7 @@ import ( // IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType. func IsUnknownOrError(val ref.Val) bool { switch val.(type) { - case Unknown, *Err: + case *Unknown, *Err: return true } return false diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel index 4bcf8a283..6fdcc60c6 100644 --- a/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -9,6 +9,7 @@ go_library( srcs = [ "encoders.go", "guards.go", + "lists.go", "math.go", "native.go", "protos.go", @@ -19,8 +20,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//cel:go_default_library", + "//checker:go_default_library", "//checker/decls:go_default_library", - "//common:go_default_library", "//common/overloads:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", @@ -41,6 +42,7 @@ go_test( size = "small", srcs = [ "encoders_test.go", + "lists_test.go", "math_test.go", "native_test.go", "protos_test.go", @@ -53,7 +55,6 @@ go_test( deps = [ "//cel:go_default_library", "//checker:go_default_library", - "//common:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md index ef0eb2ab7..6f621ac4a 100644 --- a/vendor/github.com/google/cel-go/ext/README.md +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -149,6 +149,23 @@ Example: proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false +## Lists + +Extended functions for list manipulation. As a general note, all indices are +zero-based. + +### Slice + + +Returns a new sub-list using the indexes provided. + + .slice(, ) -> + +Examples: + + [1,2,3,4].slice(1, 3) // return [2, 3] + [1,2,3,4].slice(2, 4) // return [3 ,4] + ## Sets Sets provides set relationship tests. diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go index 9cc3c3efe..4ac9a7f07 100644 --- a/vendor/github.com/google/cel-go/ext/bindings.go +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -16,7 +16,6 @@ package ext import ( "github.com/google/cel-go/cel" - "github.com/google/cel-go/common" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -71,7 +70,7 @@ func (celBindings) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { if !macroTargetMatchesNamespace(celNamespace, target) { return nil, nil } @@ -81,10 +80,7 @@ func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) case *exprpb.Expr_IdentExpr: varName = varIdent.GetIdentExpr().GetName() default: - return nil, &common.Error{ - Message: "cel.bind() variable names must be simple identifers", - Location: meh.OffsetLocation(varIdent.GetId()), - } + return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers") } varInit := args[1] resultExpr := args[2] diff --git a/vendor/github.com/google/cel-go/ext/encoders.go b/vendor/github.com/google/cel-go/ext/encoders.go index d9f9cb515..61ac0b777 100644 --- a/vendor/github.com/google/cel-go/ext/encoders.go +++ b/vendor/github.com/google/cel-go/ext/encoders.go @@ -16,7 +16,6 @@ package ext import ( "encoding/base64" - "reflect" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" @@ -86,7 +85,3 @@ func base64DecodeString(str string) ([]byte, error) { func base64EncodeBytes(bytes []byte) (string, error) { return base64.StdEncoding.EncodeToString(bytes), nil } - -var ( - bytesListType = reflect.TypeOf([]byte{}) -) diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go index 4c7786a69..785c8675b 100644 --- a/vendor/github.com/google/cel-go/ext/guards.go +++ b/vendor/github.com/google/cel-go/ext/guards.go @@ -17,6 +17,7 @@ package ext import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) diff --git a/vendor/github.com/google/cel-go/ext/lists.go b/vendor/github.com/google/cel-go/ext/lists.go new file mode 100644 index 000000000..08751d08a --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/lists.go @@ -0,0 +1,94 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Lists returns a cel.EnvOption to configure extended functions for list manipulation. +// As a general note, all indices are zero-based. +// # Slice +// +// Returns a new sub-list using the indexes provided. +// +// .slice(, ) -> +// +// Examples: +// +// [1,2,3,4].slice(1, 3) // return [2, 3] +// [1,2,3,4].slice(2, 4) // return [3 ,4] +func Lists() cel.EnvOption { + return cel.Lib(listsLib{}) +} + +type listsLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (listsLib) LibraryName() string { + return "cel.lib.ext.lists" +} + +// CompileOptions implements the Library interface method. +func (listsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + return []cel.EnvOption{ + cel.Function("slice", + cel.MemberOverload("list_slice", + []*cel.Type{listType, cel.IntType, cel.IntType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + start := args[1].(types.Int) + end := args[2].(types.Int) + result, err := slice(list, start, end) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (listsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { + listLength := list.Size().(types.Int) + if start < 0 || end < 0 { + return nil, fmt.Errorf("cannot slice(%d, %d), negative indexes not supported", start, end) + } + if start > end { + return nil, fmt.Errorf("cannot slice(%d, %d), start index must be less than or equal to end index", start, end) + } + if listLength < end { + return nil, fmt.Errorf("cannot slice(%d, %d), list is length %d", start, end, listLength) + } + + var newList []ref.Val + for i := types.Int(start); i < end; i++ { + val := list.Get(i) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go index 1c8ad585a..0b9a36103 100644 --- a/vendor/github.com/google/cel-go/ext/math.go +++ b/vendor/github.com/google/cel-go/ext/math.go @@ -19,10 +19,10 @@ import ( "strings" "github.com/google/cel-go/cel" - "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -187,24 +187,18 @@ func (mathLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, &common.Error{ - Message: "math.least() requires at least one argument", - Location: meh.OffsetLocation(target.GetId()), - } + return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument") case 1: if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { return meh.GlobalCall(minFunc, args[0]), nil } - return nil, &common.Error{ - Message: "math.least() invalid single argument value", - Location: meh.OffsetLocation(args[0].GetId()), - } + return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value") case 2: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { @@ -220,24 +214,18 @@ func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr } } -func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, &common.Error{ - Message: "math.greatest() requires at least one argument", - Location: meh.OffsetLocation(target.GetId()), - } + return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument") case 1: if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { return meh.GlobalCall(maxFunc, args[0]), nil } - return nil, &common.Error{ - Message: "math.greatest() invalid single argument value", - Location: meh.OffsetLocation(args[0].GetId()), - } + return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value") case 2: err := checkInvalidArgs(meh, "math.greatest()", args) if err != nil { @@ -323,14 +311,11 @@ func maxList(numList ref.Val) ref.Val { } } -func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *common.Error { +func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error { for _, arg := range args { err := checkInvalidArgLiteral(funcName, arg) if err != nil { - return &common.Error{ - Message: err.Error(), - Location: meh.OffsetLocation(arg.GetId()), - } + return meh.NewError(arg.GetId(), err.Error()) } } return nil diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index acbc44b6d..0b5fc38ca 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -24,13 +24,11 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "github.com/google/cel-go/cel" - "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" structpb "google.golang.org/protobuf/types/known/structpb" ) @@ -81,7 +79,7 @@ var ( // the time that it is invoked. func NativeTypes(refTypes ...any) cel.EnvOption { return func(env *cel.Env) (*cel.Env, error) { - tp, err := newNativeTypeProvider(env.TypeAdapter(), env.TypeProvider(), refTypes...) + tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...) if err != nil { return nil, err } @@ -93,7 +91,7 @@ func NativeTypes(refTypes ...any) cel.EnvOption { } } -func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, refTypes ...any) (*nativeTypeProvider, error) { +func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { nativeTypes := make(map[string]*nativeType, len(refTypes)) for _, refType := range refTypes { switch rt := refType.(type) { @@ -122,18 +120,18 @@ func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, r type nativeTypeProvider struct { nativeTypes map[string]*nativeType - baseAdapter ref.TypeAdapter - baseProvider ref.TypeProvider + baseAdapter types.Adapter + baseProvider types.Provider } -// EnumValue proxies to the ref.TypeProvider configured at the times the NativeTypes +// EnumValue proxies to the types.Provider configured at the times the NativeTypes // option was configured. func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val { return tp.baseProvider.EnumValue(enumName) } // FindIdent looks up natives type instances by qualified identifier, and if not found -// proxies to the composed ref.TypeProvider. +// proxies to the composed types.Provider. func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { if t, found := tp.nativeTypes[typeName]; found { return t, true @@ -141,32 +139,35 @@ func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { return tp.baseProvider.FindIdent(typeName) } -// FindType looks up CEL type-checker type definition by qualified identifier, and if not found -// proxies to the composed ref.TypeProvider. -func (tp *nativeTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { +// FindStructType looks up the CEL type definition by qualified identifier, and if not found +// proxies to the composed types.Provider. +func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if _, found := tp.nativeTypes[typeName]; found { - return decls.NewTypeType(decls.NewObjectType(typeName)), true + return types.NewTypeTypeWithParam(types.NewObjectType(typeName)), true } - return tp.baseProvider.FindType(typeName) + if celType, found := tp.baseProvider.FindStructType(typeName); found { + return celType, true + } + return tp.baseProvider.FindStructType(typeName) } -// FindFieldType looks up a native type's field definition, and if the type name is not a native -// type then proxies to the composed ref.TypeProvider -func (tp *nativeTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { +// FindStructFieldType looks up a native type's field definition, and if the type name is not a native +// type then proxies to the composed types.Provider +func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { t, found := tp.nativeTypes[typeName] if !found { - return tp.baseProvider.FindFieldType(typeName, fieldName) + return tp.baseProvider.FindStructFieldType(typeName, fieldName) } refField, isDefined := t.hasField(fieldName) if !found || !isDefined { return nil, false } - exprType, ok := convertToExprType(refField.Type) + celType, ok := convertToCelType(refField.Type) if !ok { return nil, false } - return &ref.FieldType{ - Type: exprType, + return &types.FieldType{ + Type: celType, IsSet: func(obj any) bool { refVal := reflect.Indirect(reflect.ValueOf(obj)) refField := refVal.FieldByName(fieldName) @@ -243,75 +244,74 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { } } -// convertToExprType converts the Golang reflect.Type to a protobuf exprpb.Type. -func convertToExprType(refType reflect.Type) (*exprpb.Type, bool) { +func convertToCelType(refType reflect.Type) (*cel.Type, bool) { switch refType.Kind() { case reflect.Bool: - return decls.Bool, true + return cel.BoolType, true case reflect.Float32, reflect.Float64: - return decls.Double, true + return cel.DoubleType, true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if refType == durationType { - return decls.Duration, true + return cel.DurationType, true } - return decls.Int, true + return cel.IntType, true case reflect.String: - return decls.String, true + return cel.StringType, true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return decls.Uint, true + return cel.UintType, true case reflect.Array, reflect.Slice: refElem := refType.Elem() if refElem == reflect.TypeOf(byte(0)) { - return decls.Bytes, true + return cel.BytesType, true } - elemType, ok := convertToExprType(refElem) + elemType, ok := convertToCelType(refElem) if !ok { return nil, false } - return decls.NewListType(elemType), true + return cel.ListType(elemType), true case reflect.Map: - keyType, ok := convertToExprType(refType.Key()) + keyType, ok := convertToCelType(refType.Key()) if !ok { return nil, false } // Ensure the key type is a int, bool, uint, string - elemType, ok := convertToExprType(refType.Elem()) + elemType, ok := convertToCelType(refType.Elem()) if !ok { return nil, false } - return decls.NewMapType(keyType, elemType), true + return cel.MapType(keyType, elemType), true case reflect.Struct: if refType == timestampType { - return decls.Timestamp, true + return cel.TimestampType, true } - return decls.NewObjectType( + return cel.ObjectType( fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), ), true case reflect.Pointer: if refType.Implements(pbMsgInterfaceType) { pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) - return decls.NewObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true + return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true } - return convertToExprType(refType.Elem()) + return convertToCelType(refType.Elem()) } return nil, false } -func newNativeObject(adapter ref.TypeAdapter, val any, refValue reflect.Value) ref.Val { +func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val { valType, err := newNativeType(refValue.Type()) if err != nil { return types.NewErr(err.Error()) } return &nativeObj{ - TypeAdapter: adapter, - val: val, - valType: valType, - refValue: refValue, + Adapter: adapter, + val: val, + valType: valType, + refValue: refValue, } } type nativeObj struct { - ref.TypeAdapter + types.Adapter val any valType *nativeType refValue reflect.Value @@ -520,11 +520,11 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { return f, true } -func adaptFieldValue(adapter ref.TypeAdapter, refField reflect.Value) ref.Val { +func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { return adapter.NativeToValue(getFieldValue(adapter, refField)) } -func getFieldValue(adapter ref.TypeAdapter, refField reflect.Value) any { +func getFieldValue(adapter types.Adapter, refField reflect.Value) any { if refField.IsZero() { switch refField.Kind() { case reflect.Array, reflect.Slice: diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go index b905e710c..a7ca27a6a 100644 --- a/vendor/github.com/google/cel-go/ext/protos.go +++ b/vendor/github.com/google/cel-go/ext/protos.go @@ -16,7 +16,6 @@ package ext import ( "github.com/google/cel-go/cel" - "github.com/google/cel-go/common" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -86,7 +85,7 @@ func (protoLib) ProgramOptions() []cel.ProgramOption { } // hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. -func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } @@ -98,7 +97,7 @@ func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex } // getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. -func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } @@ -109,7 +108,7 @@ func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex return meh.Select(args[0], extFieldName), nil } -func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *common.Error) { +func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) { isValid := false extensionField := "" switch expr.GetExprKind().(type) { @@ -117,10 +116,7 @@ func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *commo extensionField, isValid = validateIdentifier(expr) } if !isValid { - return "", &common.Error{ - Message: "invalid extension field", - Location: meh.OffsetLocation(expr.GetId()), - } + return "", meh.NewError(expr.GetId(), "invalid extension field") } return extensionField, nil } diff --git a/vendor/github.com/google/cel-go/ext/sets.go b/vendor/github.com/google/cel-go/ext/sets.go index 4820d6199..833c15f61 100644 --- a/vendor/github.com/google/cel-go/ext/sets.go +++ b/vendor/github.com/google/cel-go/ext/sets.go @@ -15,10 +15,14 @@ package ext import ( + "math" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" ) // Sets returns a cel.EnvOption to configure namespaced set relationship @@ -95,12 +99,24 @@ func (setsLib) CompileOptions() []cel.EnvOption { cel.Function("sets.intersects", cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, cel.BinaryBinding(setsIntersects))), + cel.CostEstimatorOptions( + checker.OverloadCostEstimate("list_sets_contains_list", estimateSetsCost(1)), + checker.OverloadCostEstimate("list_sets_intersects_list", estimateSetsCost(1)), + // equivalence requires potentially two m*n comparisons to ensure each list is contained by the other + checker.OverloadCostEstimate("list_sets_equivalent_list", estimateSetsCost(2)), + ), } } // ProgramOptions implements the Library interface method. func (setsLib) ProgramOptions() []cel.ProgramOption { - return []cel.ProgramOption{} + return []cel.ProgramOption{ + cel.CostTrackerOptions( + interpreter.OverloadCostTracker("list_sets_contains_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_intersects_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_equivalent_list", trackSetsCost(2)), + ), + } } func setsIntersects(listA, listB ref.Val) ref.Val { @@ -136,3 +152,46 @@ func setsEquivalent(listA, listB ref.Val) ref.Val { } return setsContains(listB, listA) } + +func estimateSetsCost(costFactor float64) checker.FunctionEstimator { + return func(estimator checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + if len(args) == 2 { + arg0Size := estimateSize(estimator, args[0]) + arg1Size := estimateSize(estimator, args[1]) + costEstimate := arg0Size.Multiply(arg1Size).MultiplyByCostFactor(costFactor).Add(callCostEstimate) + return &checker.CallEstimate{CostEstimate: costEstimate} + } + return nil + } +} + +func estimateSize(estimator checker.CostEstimator, node checker.AstNode) checker.SizeEstimate { + if l := node.ComputedSize(); l != nil { + return *l + } + if l := estimator.EstimateSize(node); l != nil { + return *l + } + return checker.SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func trackSetsCost(costFactor float64) interpreter.FunctionTracker { + return func(args []ref.Val, _ ref.Val) *uint64 { + lhsSize := actualSize(args[0]) + rhsSize := actualSize(args[1]) + cost := callCost + uint64(float64(lhsSize*rhsSize)*costFactor) + return &cost + } +} + +func actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + return 1 +} + +var ( + callCostEstimate = checker.CostEstimate{Min: 1, Max: 1} + callCost = uint64(1) +) diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go index 8455d5829..88c119f2b 100644 --- a/vendor/github.com/google/cel-go/ext/strings.go +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -173,7 +173,7 @@ const ( // 'TacoCat'.lowerAscii() // returns 'tacocat' // 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' // -// # Quote +// # Strings.Quote // // Introduced in version: 1 // @@ -301,26 +301,28 @@ func StringsLocale(locale string) StringsOption { } } -// StringsVersion configures the version of the string library. The version limits which -// functions are available. Only functions introduced below or equal to the given -// version included in the library. See the library documentation to determine -// which version a function was introduced at. If the documentation does not -// state which version a function was introduced at, it can be assumed to be -// introduced at version 0, when the library was first created. -// If this option is not set, all functions are available. -func StringsVersion(version uint32) func(lib *stringLib) *stringLib { - return func(sl *stringLib) *stringLib { - sl.version = version - return sl +// StringsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func StringsVersion(version uint32) StringsOption { + return func(lib *stringLib) *stringLib { + lib.version = version + return lib } } // CompileOptions implements the Library interface method. -func (sl *stringLib) CompileOptions() []cel.EnvOption { +func (lib *stringLib) CompileOptions() []cel.EnvOption { formatLocale := "en_US" - if sl.locale != "" { + if lib.locale != "" { // ensure locale is properly-formed if set - _, err := language.Parse(sl.locale) + _, err := language.Parse(lib.locale) if err != nil { return []cel.EnvOption{ func(e *cel.Env) (*cel.Env, error) { @@ -328,7 +330,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption { }, } } - formatLocale = sl.locale + formatLocale = lib.locale } opts := []cel.EnvOption{ @@ -432,7 +434,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption { return stringOrError(upperASCII(string(s))) }))), } - if sl.version >= 1 { + if lib.version >= 1 { opts = append(opts, cel.Function("format", cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, cel.FunctionBinding(func(args ...ref.Val) ref.Val { @@ -447,7 +449,7 @@ func (sl *stringLib) CompileOptions() []cel.EnvOption { })))) } - if sl.version >= 2 { + if lib.version >= 2 { opts = append(opts, cel.Function("join", cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel index b6d04e000..3a5219eb5 100644 --- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -25,13 +25,14 @@ go_library( importpath = "github.com/google/cel-go/interpreter", deps = [ "//common:go_default_library", + "//common/ast:go_default_library", "//common/containers:go_default_library", + "//common/functions:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", - "//interpreter/functions:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", @@ -56,12 +57,13 @@ go_test( ], deps = [ "//checker:go_default_library", - "//checker/decls:go_default_library", "//common/containers:go_default_library", "//common/debug:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", "//common/operators:go_default_library", + "//common/stdlib:go_default_library", "//common/types:go_default_library", - "//interpreter/functions:go_default_library", "//parser:go_default_library", "//test:go_default_library", "//test/proto2pb:go_default_library", diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go index f82e4e903..a80264451 100644 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -58,7 +58,7 @@ func (emptyActivation) Parent() Activation { return nil } // The output of the lazy binding will overwrite the variable reference in the internal map. // // Values which are not represented as ref.Val types on input may be adapted to a ref.Val using -// the ref.TypeAdapter configured in the environment. +// the types.Adapter configured in the environment. func NewActivation(bindings any) (Activation, error) { if bindings == nil { return nil, errors.New("bindings must be non-nil") diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index afb7c8d5b..1fbaaf17e 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -15,6 +15,8 @@ package interpreter import ( + "fmt" + "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -177,8 +179,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. func NewPartialAttributeFactory(container *containers.Container, - adapter ref.TypeAdapter, - provider ref.TypeProvider) AttributeFactory { + adapter types.Adapter, + provider types.Provider) AttributeFactory { fac := NewAttributeFactory(container, adapter, provider) return &partialAttributeFactory{ AttributeFactory: fac, @@ -191,8 +193,8 @@ func NewPartialAttributeFactory(container *containers.Container, type partialAttributeFactory struct { AttributeFactory container *containers.Container - adapter ref.TypeAdapter - provider ref.TypeProvider + adapter types.Adapter + provider types.Provider } // AbsoluteAttribute implementation of the AttributeFactory interface which wraps the @@ -241,12 +243,15 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( vars PartialActivation, attrID int64, variableNames []string, - qualifiers []Qualifier) (types.Unknown, error) { + qualifiers []Qualifier) (*types.Unknown, error) { patterns := vars.UnknownAttributePatterns() candidateIndices := map[int]struct{}{} for _, variable := range variableNames { for i, pat := range patterns { if pat.VariableMatches(variable) { + if len(qualifiers) == 0 { + return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil + } candidateIndices[i] = struct{}{} } } @@ -255,10 +260,6 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( if len(candidateIndices) == 0 { return nil, nil } - // Determine whether to return early if there are no qualifiers. - if len(qualifiers) == 0 { - return types.Unknown{attrID}, nil - } // Resolve the attribute qualifiers into a static set. This prevents more dynamic // Attribute resolutions than necessary when there are multiple unknown patterns // that traverse the same Attribute-based qualifier field. @@ -300,7 +301,28 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( } } if isUnk { - return types.Unknown{matchExprID}, nil + attr := types.NewAttributeTrail(pat.variable) + for i := 0; i < len(qualPats) && i < len(newQuals); i++ { + if qual, ok := newQuals[i].(ConstantQualifier); ok { + switch v := qual.Value().Value().(type) { + case bool: + types.QualifyAttribute[bool](attr, v) + case float64: + types.QualifyAttribute[int64](attr, int64(v)) + case int64: + types.QualifyAttribute[int64](attr, v) + case string: + types.QualifyAttribute[string](attr, v) + case uint64: + types.QualifyAttribute[uint64](attr, v) + default: + types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v)) + } + } else { + types.QualifyAttribute[string](attr, "*") + } + } + return types.NewUnknown(matchExprID, attr), nil } } return nil, nil diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index 1b19dc2b5..ca97bdfcf 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -22,8 +22,6 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // AttributeFactory provides methods creating Attribute and Qualifier values. @@ -61,7 +59,7 @@ type AttributeFactory interface { // The qualifier may consider the object type being qualified, if present. If absent, the // qualification should be considered dynamic and the qualification should still work, though // it may be sub-optimal. - NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) + NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) } // Qualifier marker interface for designating different qualifier values and where they appear @@ -131,7 +129,7 @@ type NamespacedAttribute interface { // NewAttributeFactory returns a default AttributeFactory which is produces Attribute values // capable of resolving types by simple names and qualify the values using the supported qualifier // types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a ref.TypeAdapter, p ref.TypeProvider) AttributeFactory { +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { return &attrFactory{ container: cont, adapter: a, @@ -141,8 +139,8 @@ func NewAttributeFactory(cont *containers.Container, a ref.TypeAdapter, p ref.Ty type attrFactory struct { container *containers.Container - adapter ref.TypeAdapter - provider ref.TypeProvider + adapter types.Adapter + provider types.Provider } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -199,13 +197,13 @@ func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribu } // NewQualifier is an implementation of the AttributeFactory interface. -func (r *attrFactory) NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) { +func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) { // Before creating a new qualifier check to see if this is a protobuf message field access. // If so, use the precomputed GetFrom qualification method rather than the standard // stringQualifier. str, isStr := val.(string) - if isStr && objType != nil && objType.GetMessageType() != "" { - ft, found := r.provider.FindFieldType(objType.GetMessageType(), str) + if isStr && objType != nil && objType.Kind() == types.StructKind { + ft, found := r.provider.FindStructFieldType(objType.TypeName(), str) if found && ft.IsSet != nil && ft.GetFrom != nil { return &fieldQualifier{ id: qualID, @@ -225,8 +223,8 @@ type absoluteAttribute struct { // (package) of the expression. namespaceNames []string qualifiers []Qualifier - adapter ref.TypeAdapter - provider ref.TypeProvider + adapter types.Adapter + provider types.Provider fac AttributeFactory } @@ -325,7 +323,7 @@ type conditionalAttribute struct { expr Interpretable truthy Attribute falsy Attribute - adapter ref.TypeAdapter + adapter types.Adapter fac AttributeFactory } @@ -393,8 +391,8 @@ func (a *conditionalAttribute) String() string { type maybeAttribute struct { id int64 attrs []NamespacedAttribute - adapter ref.TypeAdapter - provider ref.TypeProvider + adapter types.Adapter + provider types.Provider fac AttributeFactory } @@ -511,7 +509,7 @@ type relativeAttribute struct { id int64 operand Interpretable qualifiers []Qualifier - adapter ref.TypeAdapter + adapter types.Adapter fac AttributeFactory } @@ -576,7 +574,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -func newQualifier(adapter ref.TypeAdapter, id int64, v any, opt bool) (Qualifier, error) { +func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { var qual Qualifier switch val := v.(type) { case Attribute: @@ -657,7 +655,7 @@ func newQualifier(adapter ref.TypeAdapter, id int64, v any, opt bool) (Qualifier qual = &doubleQualifier{ id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, } - case types.Unknown: + case *types.Unknown: qual = &unknownQualifier{id: id, value: val} default: if q, ok := v.(Qualifier); ok { @@ -689,7 +687,7 @@ type stringQualifier struct { id int64 value string celValue ref.Val - adapter ref.TypeAdapter + adapter types.Adapter optional bool } @@ -790,7 +788,7 @@ type intQualifier struct { id int64 value int64 celValue ref.Val - adapter ref.TypeAdapter + adapter types.Adapter optional bool } @@ -917,7 +915,7 @@ type uintQualifier struct { id int64 value uint64 celValue ref.Val - adapter ref.TypeAdapter + adapter types.Adapter optional bool } @@ -982,7 +980,7 @@ type boolQualifier struct { id int64 value bool celValue ref.Val - adapter ref.TypeAdapter + adapter types.Adapter optional bool } @@ -1035,8 +1033,8 @@ func (q *boolQualifier) Value() ref.Val { type fieldQualifier struct { id int64 Name string - FieldType *ref.FieldType - adapter ref.TypeAdapter + FieldType *types.FieldType + adapter types.Adapter optional bool } @@ -1094,7 +1092,7 @@ type doubleQualifier struct { id int64 value float64 celValue ref.Val - adapter ref.TypeAdapter + adapter types.Adapter optional bool } @@ -1131,7 +1129,7 @@ func (q *doubleQualifier) Value() ref.Val { // for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere. type unknownQualifier struct { id int64 - value types.Unknown + value *types.Unknown } // ID is an implementation of the Qualifier interface method. @@ -1225,10 +1223,10 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { - case types.Unknown: + case *types.Unknown: return v, true, nil case *types.Err: return nil, false, v diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go index 208487b7d..502db35fc 100644 --- a/vendor/github.com/google/cel-go/interpreter/decorators.go +++ b/vendor/github.com/google/cel-go/interpreter/decorators.go @@ -75,15 +75,13 @@ func decDisableShortcircuits() InterpretableDecorator { switch expr := i.(type) { case *evalOr: return &evalExhaustiveOr{ - id: expr.id, - lhs: expr.lhs, - rhs: expr.rhs, + id: expr.id, + terms: expr.terms, }, nil case *evalAnd: return &evalExhaustiveAnd{ - id: expr.id, - lhs: expr.lhs, - rhs: expr.rhs, + id: expr.id, + terms: expr.terms, }, nil case *evalFold: expr.exhaustive = true diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go index febf9d8a8..8f0bdb7b8 100644 --- a/vendor/github.com/google/cel-go/interpreter/dispatcher.go +++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go @@ -17,7 +17,7 @@ package interpreter import ( "fmt" - "github.com/google/cel-go/interpreter/functions" + "github.com/google/cel-go/common/functions" ) // Dispatcher resolves function calls to their appropriate overload. diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go index cc0d3e6f9..4bdd1fdc7 100644 --- a/vendor/github.com/google/cel-go/interpreter/evalstate.go +++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go @@ -66,7 +66,11 @@ func (s *evalState) Value(exprID int64) (ref.Val, bool) { // SetValue is an implementation of the EvalState interface method. func (s *evalState) SetValue(exprID int64, val ref.Val) { - s.values[exprID] = val + if val == nil { + delete(s.values, exprID) + } else { + s.values[exprID] = val + } } // Reset implements the EvalState interface method. diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go index 6a98f6fa5..e3f753374 100644 --- a/vendor/github.com/google/cel-go/interpreter/formatting.go +++ b/vendor/github.com/google/cel-go/interpreter/formatting.go @@ -25,7 +25,7 @@ import ( "github.com/google/cel-go/common/types/ref" ) -type typeVerifier func(int64, ...*types.TypeValue) (bool, error) +type typeVerifier func(int64, ...ref.Type) (bool, error) // InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports // any errors at compile time. diff --git a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel index 846d11bf4..4a80c3ea0 100644 --- a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel +++ b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel @@ -7,16 +7,11 @@ package( go_library( name = "go_default_library", - srcs = [ + srcs = [ "functions.go", - "standard.go", ], importpath = "github.com/google/cel-go/interpreter/functions", deps = [ - "//common/operators:go_default_library", - "//common/overloads:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", + "//common/functions:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go index 981601752..21ffb6924 100644 --- a/vendor/github.com/google/cel-go/interpreter/functions/functions.go +++ b/vendor/github.com/google/cel-go/interpreter/functions/functions.go @@ -16,7 +16,7 @@ // interpreter and as declared within the checker#StandardDeclarations. package functions -import "github.com/google/cel-go/common/types/ref" +import fn "github.com/google/cel-go/common/functions" // Overload defines a named overload of a function, indicating an operand trait // which must be present on the first argument to the overload as well as one @@ -26,37 +26,14 @@ import "github.com/google/cel-go/common/types/ref" // and the specializations simplify the call contract for implementers of // types with operator overloads. Any added complexity is assumed to be handled // by the generic FunctionOp. -type Overload struct { - // Operator name as written in an expression or defined within - // operators.go. - Operator string - - // Operand trait used to dispatch the call. The zero-value indicates a - // global function overload or that one of the Unary / Binary / Function - // definitions should be used to execute the call. - OperandTrait int - - // Unary defines the overload with a UnaryOp implementation. May be nil. - Unary UnaryOp - - // Binary defines the overload with a BinaryOp implementation. May be nil. - Binary BinaryOp - - // Function defines the overload with a FunctionOp implementation. May be - // nil. - Function FunctionOp - - // NonStrict specifies whether the Overload will tolerate arguments that - // are types.Err or types.Unknown. - NonStrict bool -} +type Overload = fn.Overload // UnaryOp is a function that takes a single value and produces an output. -type UnaryOp func(value ref.Val) ref.Val +type UnaryOp = fn.UnaryOp // BinaryOp is a function that takes two values and produces an output. -type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val +type BinaryOp = fn.BinaryOp // FunctionOp is a function with accepts zero or more arguments and produces // a value or error as a result. -type FunctionOp func(values ...ref.Val) ref.Val +type FunctionOp = fn.FunctionOp diff --git a/vendor/github.com/google/cel-go/interpreter/functions/standard.go b/vendor/github.com/google/cel-go/interpreter/functions/standard.go deleted file mode 100644 index 73e936114..000000000 --- a/vendor/github.com/google/cel-go/interpreter/functions/standard.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package functions - -import ( - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -// StandardOverloads returns the definitions of the built-in overloads. -func StandardOverloads() []*Overload { - return []*Overload{ - // Logical not (!a) - { - Operator: operators.LogicalNot, - OperandTrait: traits.NegatorType, - Unary: func(value ref.Val) ref.Val { - if !types.IsBool(value) { - return types.ValOrErr(value, "no such overload") - } - return value.(traits.Negater).Negate() - }}, - // Not strictly false: IsBool(a) ? a : true - { - Operator: operators.NotStrictlyFalse, - Unary: notStrictlyFalse}, - // Deprecated: not strictly false, may be overridden in the environment. - { - Operator: operators.OldNotStrictlyFalse, - Unary: notStrictlyFalse}, - - // Less than operator - {Operator: operators.Less, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne { - return types.True - } - if cmp == types.IntOne || cmp == types.IntZero { - return types.False - } - return cmp - }}, - - // Less than or equal operator - {Operator: operators.LessEquals, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntOne { - return types.False - } - return cmp - }}, - - // Greater than operator - {Operator: operators.Greater, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne { - return types.True - } - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.False - } - return cmp - }}, - - // Greater than equal operators - {Operator: operators.GreaterEquals, - OperandTrait: traits.ComparerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntNegOne { - return types.False - } - return cmp - }}, - - // Add operator - {Operator: operators.Add, - OperandTrait: traits.AdderType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Adder).Add(rhs) - }}, - - // Subtract operators - {Operator: operators.Subtract, - OperandTrait: traits.SubtractorType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Subtractor).Subtract(rhs) - }}, - - // Multiply operator - {Operator: operators.Multiply, - OperandTrait: traits.MultiplierType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Multiplier).Multiply(rhs) - }}, - - // Divide operator - {Operator: operators.Divide, - OperandTrait: traits.DividerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Divider).Divide(rhs) - }}, - - // Modulo operator - {Operator: operators.Modulo, - OperandTrait: traits.ModderType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Modder).Modulo(rhs) - }}, - - // Negate operator - {Operator: operators.Negate, - OperandTrait: traits.NegatorType, - Unary: func(value ref.Val) ref.Val { - if types.IsBool(value) { - return types.ValOrErr(value, "no such overload") - } - return value.(traits.Negater).Negate() - }}, - - // Index operator - {Operator: operators.Index, - OperandTrait: traits.IndexerType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Indexer).Get(rhs) - }}, - - // Size function - {Operator: overloads.Size, - OperandTrait: traits.SizerType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Sizer).Size() - }}, - - // In operator - {Operator: operators.In, Binary: inAggregate}, - // Deprecated: in operator, may be overridden in the environment. - {Operator: operators.OldIn, Binary: inAggregate}, - - // Matches function - {Operator: overloads.Matches, - OperandTrait: traits.MatcherType, - Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { - return lhs.(traits.Matcher).Match(rhs) - }}, - - // Type conversion functions - // TODO: verify type conversion safety of numeric values. - - // Int conversions. - {Operator: overloads.TypeConvertInt, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.IntType) - }}, - - // Uint conversions. - {Operator: overloads.TypeConvertUint, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.UintType) - }}, - - // Double conversions. - {Operator: overloads.TypeConvertDouble, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.DoubleType) - }}, - - // Bool conversions. - {Operator: overloads.TypeConvertBool, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.BoolType) - }}, - - // Bytes conversions. - {Operator: overloads.TypeConvertBytes, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.BytesType) - }}, - - // String conversions. - {Operator: overloads.TypeConvertString, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.StringType) - }}, - - // Timestamp conversions. - {Operator: overloads.TypeConvertTimestamp, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.TimestampType) - }}, - - // Duration conversions. - {Operator: overloads.TypeConvertDuration, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.DurationType) - }}, - - // Type operations. - {Operator: overloads.TypeConvertType, - Unary: func(value ref.Val) ref.Val { - return value.ConvertToType(types.TypeType) - }}, - - // Dyn conversion (identity function). - {Operator: overloads.TypeConvertDyn, - Unary: func(value ref.Val) ref.Val { - return value - }}, - - {Operator: overloads.Iterator, - OperandTrait: traits.IterableType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterable).Iterator() - }}, - - {Operator: overloads.HasNext, - OperandTrait: traits.IteratorType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterator).HasNext() - }}, - - {Operator: overloads.Next, - OperandTrait: traits.IteratorType, - Unary: func(value ref.Val) ref.Val { - return value.(traits.Iterator).Next() - }}, - } - -} - -func notStrictlyFalse(value ref.Val) ref.Val { - if types.IsBool(value) { - return value - } - return types.True -} - -func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { - if rhs.Type().HasTrait(traits.ContainerType) { - return rhs.(traits.Container).Contains(lhs) - } - return types.ValOrErr(rhs, "no such overload") -} diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 32e2bcb7d..c4598dfa7 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -17,12 +17,12 @@ package interpreter import ( "fmt" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter/functions" ) // Interpretable can accept a given Activation and produce a value along with @@ -52,7 +52,7 @@ type InterpretableAttribute interface { Attr() Attribute // Adapter returns the type adapter to be used for adapting resolved Attribute values. - Adapter() ref.TypeAdapter + Adapter() types.Adapter // AddQualifier proxies the Attribute.AddQualifier method. // @@ -202,9 +202,8 @@ func (cons *evalConst) Value() ref.Val { } type evalOr struct { - id int64 - lhs Interpretable - rhs Interpretable + id int64 + terms []Interpretable } // ID implements the Interpretable interface method. @@ -214,41 +213,39 @@ func (or *evalOr) ID() int64 { // Eval implements the Interpretable interface method. func (or *evalOr) Eval(ctx Activation) ref.Val { - // short-circuit lhs. - lVal := or.lhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.True { - return types.True - } - // short-circuit on rhs. - rVal := or.rhs.Eval(ctx) - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.True { - return types.True - } - // return if both sides are bool false. - if lok && rok { - return types.False + var err ref.Val = nil + var unk *types.Unknown + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on true. + if ok && boolVal == types.True { + return types.True + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } } - // TODO: return both values as a set if both are unknown or error. - // prefer left unknown to right unknown. - if types.IsUnknown(lVal) { - return lVal + if unk != nil { + return unk } - if types.IsUnknown(rVal) { - return rVal - } - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal + if err != nil { + return err } - return types.ValOrErr(rVal, "no such overload") + return types.False } type evalAnd struct { - id int64 - lhs Interpretable - rhs Interpretable + id int64 + terms []Interpretable } // ID implements the Interpretable interface method. @@ -258,35 +255,34 @@ func (and *evalAnd) ID() int64 { // Eval implements the Interpretable interface method. func (and *evalAnd) Eval(ctx Activation) ref.Val { - // short-circuit lhs. - lVal := and.lhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.False { - return types.False - } - // short-circuit on rhs. - rVal := and.rhs.Eval(ctx) - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.False { - return types.False - } - // return if both sides are bool true. - if lok && rok { - return types.True - } - // TODO: return both values as a set if both are unknown or error. - // prefer left unknown to right unknown. - if types.IsUnknown(lVal) { - return lVal + var err ref.Val = nil + var unk *types.Unknown + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + return types.False + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } } - if types.IsUnknown(rVal) { - return rVal + if unk != nil { + return unk } - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal + if err != nil { + return err } - return types.ValOrErr(rVal, "no such overload") + return types.True } type evalEq struct { @@ -579,7 +575,7 @@ type evalList struct { elems []Interpretable optionals []bool hasOptionals bool - adapter ref.TypeAdapter + adapter types.Adapter } // ID implements the Interpretable interface method. @@ -625,7 +621,7 @@ type evalMap struct { vals []Interpretable optionals []bool hasOptionals bool - adapter ref.TypeAdapter + adapter types.Adapter } // ID implements the Interpretable interface method. @@ -689,7 +685,7 @@ type evalObj struct { vals []Interpretable optionals []bool hasOptionals bool - provider ref.TypeProvider + provider types.Provider } // ID implements the Interpretable interface method. @@ -739,7 +735,7 @@ type evalFold struct { cond Interpretable step Interpretable result Interpretable - adapter ref.TypeAdapter + adapter types.Adapter exhaustive bool interruptable bool } @@ -865,18 +861,40 @@ type evalWatchAttr struct { // AddQualifier creates a wrapper over the incoming qualifier which observes the qualification // result. func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) { - cq, isConst := q.(ConstantQualifier) - if isConst { + switch qual := q.(type) { + // By default, the qualifier is either a constant or an attribute + // There may be some custom cases where the attribute is neither. + case ConstantQualifier: + // Expose a method to test whether the qualifier matches the input pattern. q = &evalWatchConstQual{ - ConstantQualifier: cq, + ConstantQualifier: qual, observer: e.observer, - adapter: e.InterpretableAttribute.Adapter(), + adapter: e.Adapter(), } - } else { + case *evalWatchAttr: + // Unwrap the evalWatchAttr since the observation will be applied during Qualify or + // QualifyIfPresent rather than Eval. + q = &evalWatchAttrQual{ + Attribute: qual.InterpretableAttribute, + observer: e.observer, + adapter: e.Adapter(), + } + case Attribute: + // Expose methods which intercept the qualification prior to being applied as a qualifier. + // Using this interface ensures that the qualifier is converted to a constant value one + // time during attribute pattern matching as the method embeds the Attribute interface + // needed to trip the conversion to a constant. + q = &evalWatchAttrQual{ + Attribute: qual, + observer: e.observer, + adapter: e.Adapter(), + } + default: + // This is likely a custom qualifier type. q = &evalWatchQual{ - Qualifier: q, + Qualifier: qual, observer: e.observer, - adapter: e.InterpretableAttribute.Adapter(), + adapter: e.Adapter(), } } _, err := e.InterpretableAttribute.AddQualifier(q) @@ -895,7 +913,7 @@ func (e *evalWatchAttr) Eval(vars Activation) ref.Val { type evalWatchConstQual struct { ConstantQualifier observer EvalObserver - adapter ref.TypeAdapter + adapter types.Adapter } // Qualify observes the qualification of a object via a constant boolean, int, string, or uint. @@ -934,11 +952,48 @@ func (e *evalWatchConstQual) QualifierValueEquals(value any) bool { return ok && qve.QualifierValueEquals(value) } +// evalWatchAttrQual observes the qualification of an object by a value computed at runtime. +type evalWatchAttrQual struct { + Attribute + observer EvalObserver + adapter ref.TypeAdapter +} + +// Qualify observes the qualification of a object via a value computed at runtime. +func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.Attribute.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.WrapErr(err) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.Attribute, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.WrapErr(err) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Attribute, val) + } + return out, present, err +} + // evalWatchQual observes the qualification of an object by a value computed at runtime. type evalWatchQual struct { Qualifier observer EvalObserver - adapter ref.TypeAdapter + adapter types.Adapter } // Qualify observes the qualification of a object via a value computed at runtime. @@ -986,9 +1041,8 @@ func (e *evalWatchConst) Eval(vars Activation) ref.Val { // evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation. type evalExhaustiveOr struct { - id int64 - lhs Interpretable - rhs Interpretable + id int64 + terms []Interpretable } // ID implements the Interpretable interface method. @@ -998,38 +1052,44 @@ func (or *evalExhaustiveOr) ID() int64 { // Eval implements the Interpretable interface method. func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val { - lVal := or.lhs.Eval(ctx) - rVal := or.rhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.True { - return types.True + var err ref.Val = nil + var unk *types.Unknown + isTrue := false + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // flag the result as true + if ok && boolVal == types.True { + isTrue = true + } + if !ok && !isTrue { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } } - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.True { + if isTrue { return types.True } - if lok && rok { - return types.False - } - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal + if unk != nil { + return unk } - // TODO: Combine the errors into a set in the future. - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal + if err != nil { + return err } - return types.MaybeNoSuchOverloadErr(rVal) + return types.False } // evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation. type evalExhaustiveAnd struct { - id int64 - lhs Interpretable - rhs Interpretable + id int64 + terms []Interpretable } // ID implements the Interpretable interface method. @@ -1039,38 +1099,45 @@ func (and *evalExhaustiveAnd) ID() int64 { // Eval implements the Interpretable interface method. func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val { - lVal := and.lhs.Eval(ctx) - rVal := and.rhs.Eval(ctx) - lBool, lok := lVal.(types.Bool) - if lok && lBool == types.False { - return types.False + var err ref.Val = nil + var unk *types.Unknown + isFalse := false + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + isFalse = true + } + if !ok && !isFalse { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } } - rBool, rok := rVal.(types.Bool) - if rok && rBool == types.False { + if isFalse { return types.False } - if lok && rok { - return types.True - } - if types.IsUnknown(lVal) { - return lVal - } - if types.IsUnknown(rVal) { - return rVal + if unk != nil { + return unk } - // TODO: Combine the errors into a set in the future. - // If the left-hand side is non-boolean return it as the error. - if types.IsError(lVal) { - return lVal + if err != nil { + return err } - return types.MaybeNoSuchOverloadErr(rVal) + return types.True } // evalExhaustiveConditional is like evalConditional, but does not short-circuit argument // evaluation. type evalExhaustiveConditional struct { id int64 - adapter ref.TypeAdapter + adapter types.Adapter attr *conditionalAttribute } @@ -1102,7 +1169,7 @@ func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { // evalAttr evaluates an Attribute value. type evalAttr struct { - adapter ref.TypeAdapter + adapter types.Adapter attr Attribute optional bool } @@ -1127,7 +1194,7 @@ func (a *evalAttr) Attr() Attribute { } // Adapter implements the InterpretableAttribute interface method. -func (a *evalAttr) Adapter() ref.TypeAdapter { +func (a *evalAttr) Adapter() types.Adapter { return a.adapter } diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go index 707a6105a..00fc74732 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -18,9 +18,10 @@ package interpreter import ( + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -29,7 +30,7 @@ import ( type Interpreter interface { // NewInterpretable creates an Interpretable from a checked expression and an // optional list of InterpretableDecorator values. - NewInterpretable(checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) (Interpretable, error) + NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error) // NewUncheckedInterpretable returns an Interpretable from a parsed expression // and an optional list of InterpretableDecorator values. @@ -154,8 +155,8 @@ func CompileRegexConstants(regexOptimizations ...*RegexOptimization) Interpretab type exprInterpreter struct { dispatcher Dispatcher container *containers.Container - provider ref.TypeProvider - adapter ref.TypeAdapter + provider types.Provider + adapter types.Adapter attrFactory AttributeFactory } @@ -163,8 +164,8 @@ type exprInterpreter struct { // throughout the Eval of all Interpretable instances generated from it. func NewInterpreter(dispatcher Dispatcher, container *containers.Container, - provider ref.TypeProvider, - adapter ref.TypeAdapter, + provider types.Provider, + adapter types.Adapter, attrFactory AttributeFactory) Interpreter { return &exprInterpreter{ dispatcher: dispatcher, @@ -174,20 +175,9 @@ func NewInterpreter(dispatcher Dispatcher, attrFactory: attrFactory} } -// NewStandardInterpreter builds a Dispatcher and TypeProvider with support for all of the CEL -// builtins defined in the language definition. -func NewStandardInterpreter(container *containers.Container, - provider ref.TypeProvider, - adapter ref.TypeAdapter, - resolver AttributeFactory) Interpreter { - dispatcher := NewDispatcher() - dispatcher.Add(functions.StandardOverloads()...) - return NewInterpreter(dispatcher, container, provider, adapter, resolver) -} - // NewIntepretable implements the Interpreter interface method. func (i *exprInterpreter) NewInterpretable( - checked *exprpb.CheckedExpr, + checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error) { p := newPlanner( i.dispatcher, @@ -197,7 +187,7 @@ func (i *exprInterpreter) NewInterpretable( i.container, checked, decorators...) - return p.Plan(checked.GetExpr()) + return p.Plan(checked.Expr) } // NewUncheckedIntepretable implements the Interpreter interface method. diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index 0b65d0fa9..757cd080e 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -18,10 +18,12 @@ import ( "fmt" "strings" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -37,11 +39,11 @@ type interpretablePlanner interface { // functions, types, and namespaced identifiers at plan time rather than at runtime since // it only needs to be done once and may be semi-expensive to compute. func newPlanner(disp Dispatcher, - provider ref.TypeProvider, - adapter ref.TypeAdapter, + provider types.Provider, + adapter types.Adapter, attrFactory AttributeFactory, cont *containers.Container, - checked *exprpb.CheckedExpr, + checked *ast.CheckedAST, decorators ...InterpretableDecorator) interpretablePlanner { return &planner{ disp: disp, @@ -49,8 +51,8 @@ func newPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: checked.GetReferenceMap(), - typeMap: checked.GetTypeMap(), + refMap: checked.ReferenceMap, + typeMap: checked.TypeMap, decorators: decorators, } } @@ -59,8 +61,8 @@ func newPlanner(disp Dispatcher, // TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in // Select expressions are resolved lazily at evaluation time. func newUncheckedPlanner(disp Dispatcher, - provider ref.TypeProvider, - adapter ref.TypeAdapter, + provider types.Provider, + adapter types.Adapter, attrFactory AttributeFactory, cont *containers.Container, decorators ...InterpretableDecorator) interpretablePlanner { @@ -70,8 +72,8 @@ func newUncheckedPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: make(map[int64]*exprpb.Reference), - typeMap: make(map[int64]*exprpb.Type), + refMap: make(map[int64]*ast.ReferenceInfo), + typeMap: make(map[int64]*types.Type), decorators: decorators, } } @@ -79,12 +81,12 @@ func newUncheckedPlanner(disp Dispatcher, // planner is an implementation of the interpretablePlanner interface. type planner struct { disp Dispatcher - provider ref.TypeProvider - adapter ref.TypeAdapter + provider types.Provider + adapter types.Adapter attrFactory AttributeFactory container *containers.Container - refMap map[int64]*exprpb.Reference - typeMap map[int64]*exprpb.Type + refMap map[int64]*ast.ReferenceInfo + typeMap map[int64]*types.Type decorators []InterpretableDecorator } @@ -143,22 +145,19 @@ func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) { }, nil } -func (p *planner) planCheckedIdent(id int64, identRef *exprpb.Reference) (Interpretable, error) { +func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { // Plan a constant reference if this is the case for this simple identifier. - if identRef.GetValue() != nil { - return p.Plan(&exprpb.Expr{Id: id, - ExprKind: &exprpb.Expr_ConstExpr{ - ConstExpr: identRef.GetValue(), - }}) + if identRef.Value != nil { + return NewConstValue(id, identRef.Value), nil } // Check to see whether the type map indicates this is a type name. All types should be // registered with the provider. cType := p.typeMap[id] - if cType.GetType() != nil { - cVal, found := p.provider.FindIdent(identRef.GetName()) + if cType.Kind() == types.TypeKind { + cVal, found := p.provider.FindIdent(identRef.Name) if !found { - return nil, fmt.Errorf("reference to undefined type: %s", identRef.GetName()) + return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name) } return NewConstValue(id, cVal), nil } @@ -166,7 +165,7 @@ func (p *planner) planCheckedIdent(id int64, identRef *exprpb.Reference) (Interp // Otherwise, return the attribute for the resolved identifier name. return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.AbsoluteAttribute(id, identRef.GetName()), + attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name), }, nil } @@ -429,18 +428,16 @@ func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Int // planCallLogicalAnd generates a logical and (&&) Interpretable. func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { return &evalAnd{ - id: expr.GetId(), - lhs: args[0], - rhs: args[1], + id: expr.GetId(), + terms: args, }, nil } // planCallLogicalOr generates a logical or (||) Interpretable. func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { return &evalOr{ - id: expr.GetId(), - lhs: args[0], - rhs: args[1], + id: expr.GetId(), + terms: args, }, nil } @@ -476,7 +473,7 @@ func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) ( func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) { op := args[0] ind := args[1] - opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()] + opType := p.typeMap[op.ID()] // Establish the attribute reference. var err error @@ -675,7 +672,7 @@ func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) { // namespace resolution rules to it in a scan over possible matching types in the TypeProvider. func (p *planner) resolveTypeName(typeName string) (string, bool) { for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { - if _, found := p.provider.FindType(qualifiedTypeName); found { + if _, found := p.provider.FindStructType(qualifiedTypeName); found { return qualifiedTypeName, true } } @@ -702,8 +699,8 @@ func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, stri // function name as the fnName value. oRef, hasOverload := p.refMap[expr.GetId()] if hasOverload { - if len(oRef.GetOverloadId()) == 1 { - return target, fnName, oRef.GetOverloadId()[0] + if len(oRef.OverloadIDs) == 1 { + return target, fnName, oRef.OverloadIDs[0] } // Note, this namespaced function name will not appear as a fully qualified name in ASTs // built and stored before cel-go v0.5.0; however, this functionality did not work at all diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go index d1b5d6bd6..b8834b1cb 100644 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -341,6 +341,11 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { } } if macro, found := p.macroCalls[node.GetId()]; found { + // Ensure that intermediate values for the comprehension are cleared during pruning + compre := node.GetComprehensionExpr() + if compre != nil { + visit(macro, clearIterVarVisitor(compre.IterVar, p.state)) + } // prune the expression in terms of the macro call instead of the expanded form. if newMacro, pruned := p.prune(macro); pruned { p.macroCalls[node.GetId()] = newMacro @@ -488,6 +493,27 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { }, }, true } + case *exprpb.Expr_ComprehensionExpr: + compre := node.GetComprehensionExpr() + // Only the range of the comprehension is pruned since the state tracking only records + // the last iteration of the comprehension and not each step in the evaluation which + // means that the any residuals computed in between might be inaccurate. + if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned { + return &exprpb.Expr{ + Id: node.GetId(), + ExprKind: &exprpb.Expr_ComprehensionExpr{ + ComprehensionExpr: &exprpb.Expr_Comprehension{ + IterVar: compre.GetIterVar(), + IterRange: newRange, + AccuVar: compre.GetAccuVar(), + AccuInit: compre.GetAccuInit(), + LoopCondition: compre.GetLoopCondition(), + LoopStep: compre.GetLoopStep(), + Result: compre.GetResult(), + }, + }, + }, true + } } return node, false } @@ -524,6 +550,17 @@ func getMaxID(expr *exprpb.Expr) int64 { return maxID } +func clearIterVarVisitor(varName string, state EvalState) astVisitor { + return astVisitor{ + visitExpr: func(e *exprpb.Expr) { + ident := e.GetIdentExpr() + if ident != nil && ident.GetName() == varName { + state.SetValue(e.GetId(), nil) + } + }, + } +} + func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ visitExpr: func(e *exprpb.Expr) { @@ -543,7 +580,9 @@ func visit(expr *exprpb.Expr, visitor astVisitor) { exprs := []*exprpb.Expr{expr} for len(exprs) != 0 { e := exprs[0] - visitor.visitExpr(e) + if visitor.visitExpr != nil { + visitor.visitExpr(e) + } exprs = exprs[1:] switch e.GetExprKind().(type) { case *exprpb.Expr_SelectExpr: @@ -567,7 +606,9 @@ func visit(expr *exprpb.Expr, visitor astVisitor) { exprs = append(exprs, list.GetElements()...) case *exprpb.Expr_StructExpr: for _, entry := range e.GetStructExpr().GetEntries() { - visitor.visitEntry(entry) + if visitor.visitEntry != nil { + visitor.visitEntry(entry) + } if entry.GetMapKey() != nil { exprs = append(exprs, entry.GetMapKey()) } diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go index 80e7f6134..b9b307c15 100644 --- a/vendor/github.com/google/cel-go/interpreter/runtimecost.go +++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -65,13 +65,21 @@ func CostObserver(tracker *CostTracker) EvalObserver { // While the field names are identical, the boolean operation eval structs do not share an interface and so // must be handled individually. case *evalOr: - tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } case *evalAnd: - tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } case *evalExhaustiveOr: - tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } case *evalExhaustiveAnd: - tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } case *evalFold: tracker.stack.drop(t.iterRange.ID()) case Qualifier: @@ -125,6 +133,7 @@ func PresenceTestHasCost(hasCost bool) CostTrackerOption { func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) { tracker := &CostTracker{ Estimator: estimator, + overloadTrackers: map[string]FunctionTracker{}, presenceTestHasCost: true, } for _, opt := range opts { @@ -136,9 +145,24 @@ func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (* return tracker, nil } +// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation. +// +// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or +// optional cost tracking changes. +func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.overloadTrackers[overloadID] = fnTracker + return nil + } +} + +// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result. +type FunctionTracker func(args []ref.Val, result ref.Val) *uint64 + // CostTracker represents the information needed for tracking runtime cost. type CostTracker struct { Estimator ActualCostEstimator + overloadTrackers map[string]FunctionTracker Limit *uint64 presenceTestHasCost bool @@ -151,10 +175,19 @@ func (c *CostTracker) ActualCost() uint64 { return c.cost } -func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 { +func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 { var cost uint64 + if len(c.overloadTrackers) != 0 { + if tracker, found := c.overloadTrackers[call.OverloadID()]; found { + callCost := tracker(args, result) + if callCost != nil { + cost += *callCost + return cost + } + } + } if c.Estimator != nil { - callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), argValues, result) + callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result) if callCost != nil { cost += *callCost return cost @@ -165,11 +198,11 @@ func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resu switch call.OverloadID() { // O(n) functions case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: - cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor)) + cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). - cost += c.actualSize(argValues[1]) + cost += c.actualSize(args[1]) // O(min(m, n)) functions case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, @@ -177,8 +210,8 @@ func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resu // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost // of 1. - lhsSize := c.actualSize(argValues[0]) - rhsSize := c.actualSize(argValues[1]) + lhsSize := c.actualSize(args[0]) + rhsSize := c.actualSize(args[1]) minSize := lhsSize if rhsSize < minSize { minSize = rhsSize @@ -187,23 +220,23 @@ func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resu // O(m+n) functions case overloads.AddString, overloads.AddBytes: // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. - cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])+c.actualSize(argValues[1])) * common.StringTraversalCostFactor)) + cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) // O(nm) functions case overloads.MatchesString: // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. - strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(argValues[0]))) * common.StringTraversalCostFactor)) + strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. - regexCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.RegexStringLengthCostFactor)) + regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) cost += strCost * regexCost case overloads.ContainsString: - strCost := uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor)) - substrCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.StringTraversalCostFactor)) + strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) cost += strCost * substrCost default: diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go index ce49bb87f..93ae7a3ad 100644 --- a/vendor/github.com/google/cel-go/parser/errors.go +++ b/vendor/github.com/google/cel-go/parser/errors.go @@ -22,9 +22,22 @@ import ( // parseErrors is a specialization of Errors. type parseErrors struct { - *common.Errors + errs *common.Errors +} + +// errorCount indicates the number of errors reported. +func (e *parseErrors) errorCount() int { + return len(e.errs.GetErrors()) +} + +func (e *parseErrors) internalError(message string) { + e.errs.ReportErrorAtID(0, common.NoLocation, message) } func (e *parseErrors) syntaxError(l common.Location, message string) { - e.ReportError(l, fmt.Sprintf("Syntax error: %s", message)) + e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) +} + +func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { + e.errs.ReportErrorAtID(id, l, message, args...) } diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index 8f8f478ed..a5f29e3d7 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -193,15 +193,15 @@ func (p *parserHelper) newExpr(ctx any) *exprpb.Expr { func (p *parserHelper) id(ctx any) int64 { var location common.Location - switch ctx.(type) { + switch c := ctx.(type) { case antlr.ParserRuleContext: - token := (ctx.(antlr.ParserRuleContext)).GetStart() + token := c.GetStart() location = p.source.NewLocation(token.GetLine(), token.GetColumn()) case antlr.Token: - token := ctx.(antlr.Token) + token := c location = p.source.NewLocation(token.GetLine(), token.GetColumn()) case common.Location: - location = ctx.(common.Location) + location = c default: // This should only happen if the ctx is nil return -1 @@ -297,67 +297,83 @@ func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprp } } -// balancer performs tree balancing on operators whose arguments are of equal precedence. +// logicManager compacts logical trees into a more efficient structure which is semantically +// equivalent with how the logic graph is constructed by the ANTLR parser. // -// The purpose of the balancer is to ensure a compact serialization format for the logical &&, || +// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, || // operators which have a tendency to create long DAGs which are skewed in one direction. Since the // operators are commutative re-ordering the terms *must not* affect the evaluation result. // -// Re-balancing the terms is a safe, if somewhat controversial choice. A better solution would be -// to make these functions variadic and update both the checker and interpreter to understand this; -// however, this is a more complex change. -// -// TODO: Consider replacing tree-balancing with variadic logical &&, || within the parser, checker, -// and interpreter. -type balancer struct { - helper *parserHelper - function string - terms []*exprpb.Expr - ops []int64 -} - -// newBalancer creates a balancer instance bound to a specific function and its first term. -func newBalancer(h *parserHelper, function string, term *exprpb.Expr) *balancer { - return &balancer{ - helper: h, - function: function, - terms: []*exprpb.Expr{term}, - ops: []int64{}, +// The logic manager will either render the terms to N-chained && / || operators as a single logical +// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat +// controversial choice as it alters the traditional order of execution assumptions present in most +// expressions. +type logicManager struct { + helper *parserHelper + function string + terms []*exprpb.Expr + ops []int64 + variadicASTs bool +} + +// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. +func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { + return &logicManager{ + helper: h, + function: function, + terms: []*exprpb.Expr{term}, + ops: []int64{}, + variadicASTs: true, + } +} + +// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. +func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { + return &logicManager{ + helper: h, + function: function, + terms: []*exprpb.Expr{term}, + ops: []int64{}, + variadicASTs: false, } } // addTerm adds an operation identifier and term to the set of terms to be balanced. -func (b *balancer) addTerm(op int64, term *exprpb.Expr) { - b.terms = append(b.terms, term) - b.ops = append(b.ops, op) +func (l *logicManager) addTerm(op int64, term *exprpb.Expr) { + l.terms = append(l.terms, term) + l.ops = append(l.ops, op) } -// balance creates a balanced tree from the sub-terms and returns the final Expr value. -func (b *balancer) balance() *exprpb.Expr { - if len(b.terms) == 1 { - return b.terms[0] +// toExpr renders the logic graph into an Expr value, either balancing a tree of logical +// operations or creating a variadic representation of the logical operator. +func (l *logicManager) toExpr() *exprpb.Expr { + if len(l.terms) == 1 { + return l.terms[0] + } + if l.variadicASTs { + return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...) } - return b.balancedTree(0, len(b.ops)-1) + return l.balancedTree(0, len(l.ops)-1) } // balancedTree recursively balances the terms provided to a commutative operator. -func (b *balancer) balancedTree(lo, hi int) *exprpb.Expr { +func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr { mid := (lo + hi + 1) / 2 var left *exprpb.Expr if mid == lo { - left = b.terms[mid] + left = l.terms[mid] } else { - left = b.balancedTree(lo, mid-1) + left = l.balancedTree(lo, mid-1) } var right *exprpb.Expr if mid == hi { - right = b.terms[mid+1] + right = l.terms[mid+1] } else { - right = b.balancedTree(mid+1, hi) + right = l.balancedTree(mid+1, hi) } - return b.helper.newGlobalCall(b.ops[mid], b.function, left, right) + return l.helper.newGlobalCall(l.ops[mid], l.function, left, right) } type exprHelper struct { @@ -370,7 +386,7 @@ func (e *exprHelper) nextMacroID() int64 { } // Copy implements the ExprHelper interface method by producing a copy of the input Expr value -// with a fresh set of numeric identifiers the Expr and all its descendents. +// with a fresh set of numeric identifiers the Expr and all its descendants. func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr { copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId())) switch expr.GetExprKind().(type) { @@ -558,11 +574,22 @@ func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { // OffsetLocation implements the ExprHelper interface method. func (e *exprHelper) OffsetLocation(exprID int64) common.Location { - offset := e.parserHelper.positions[exprID] - location, _ := e.parserHelper.source.OffsetLocation(offset) + offset, found := e.parserHelper.positions[exprID] + if !found { + return common.NoLocation + } + location, found := e.parserHelper.source.OffsetLocation(offset) + if !found { + return common.NoLocation + } return location } +// NewError associates an error message with a given expression id, populating the source offset location of the error if possible. +func (e *exprHelper) NewError(exprID int64, message string) *common.Error { + return common.NewError(exprID, message, e.OffsetLocation(exprID)) +} + var ( // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations. exprHelperPool = &sync.Pool{ diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 80e5c66c6..6066e8ef4 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -232,6 +232,9 @@ type ExprHelper interface { // OffsetLocation returns the Location of the expression identifier. OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *common.Error } var ( @@ -324,7 +327,7 @@ func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*ex func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, &common.Error{Message: "argument is not an identifier"} + return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") } var fn *exprpb.Expr @@ -355,7 +358,7 @@ func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, &common.Error{Message: "argument is not an identifier"} + return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") } filter := args[1] @@ -372,17 +375,13 @@ func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok { return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil } - return nil, &common.Error{Message: "invalid argument to has() macro"} + return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro") } func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - location := eh.OffsetLocation(args[0].GetId()) - return nil, &common.Error{ - Message: "argument must be a simple name", - Location: location, - } + return nil, eh.NewError(args[0].GetId(), "argument must be a simple name") } var init *exprpb.Expr @@ -411,7 +410,7 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, arg eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent()) result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr) default: - return nil, &common.Error{Message: fmt.Sprintf("unrecognized quantifier '%v'", kind)} + return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil } diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go index 674c697c5..61fc3adec 100644 --- a/vendor/github.com/google/cel-go/parser/options.go +++ b/vendor/github.com/google/cel-go/parser/options.go @@ -25,6 +25,7 @@ type options struct { macros map[string]Macro populateMacroCalls bool enableOptionalSyntax bool + enableVariadicOperatorASTs bool } // Option configures the behavior of the parser. @@ -125,3 +126,15 @@ func EnableOptionalSyntax(optionalSyntax bool) Option { return nil } } + +// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative +// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` +// +// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested +// logic graphs. +func EnableVariadicOperatorASTs(varArgASTs bool) Option { + return func(opts *options) error { + opts.enableVariadicOperatorASTs = varArgASTs + return nil + } +} diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index e6f70f906..109326a93 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -89,8 +89,9 @@ func mustNewParser(opts ...Option) *Parser { // Parse parses the expression represented by source and returns the result. func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { + errs := common.NewErrors(source) impl := parser{ - errors: &parseErrors{common.NewErrors(source)}, + errors: &parseErrors{errs}, helper: newParserHelper(source), macros: p.macros, maxRecursionDepth: p.maxRecursionDepth, @@ -99,6 +100,7 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit, populateMacroCalls: p.populateMacroCalls, enableOptionalSyntax: p.enableOptionalSyntax, + enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, } buf, ok := source.(runes.Buffer) if !ok { @@ -115,7 +117,7 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors return &exprpb.ParsedExpr{ Expr: e, SourceInfo: impl.helper.getSourceInfo(), - }, impl.errors.Errors + }, errs } // reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid @@ -295,6 +297,7 @@ type parser struct { errorRecoveryLookaheadTokenLimit int populateMacroCalls bool enableOptionalSyntax bool + enableVariadicOperatorASTs bool } var ( @@ -357,9 +360,9 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { if val := recover(); val != nil { switch err := val.(type) { case *lookaheadLimitError: - p.errors.ReportError(common.NoLocation, err.Error()) + p.errors.internalError(err.Error()) case *recursionError: - p.errors.ReportError(common.NoLocation, err.Error()) + p.errors.internalError(err.Error()) case *tooManyErrors: // do nothing case *recoveryLimitError: @@ -449,7 +452,7 @@ func (p *parser) Visit(tree antlr.ParseTree) any { // Report at least one error if the parser reaches an unknown parse element. // Typically, this happens if the parser has already encountered a syntax error elsewhere. - if len(p.errors.GetErrors()) == 0 { + if p.errors.errorCount() == 0 { txt := "<>" if t != nil { txt = fmt.Sprintf("<<%T>>", t) @@ -480,7 +483,7 @@ func (p *parser) VisitExpr(ctx *gen.ExprContext) any { // Visit a parse tree produced by CELParser#conditionalOr. func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { result := p.Visit(ctx.GetE()).(*exprpb.Expr) - b := newBalancer(p.helper, operators.LogicalOr, result) + l := p.newLogicManager(operators.LogicalOr, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { @@ -488,15 +491,15 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { } next := p.Visit(rest[i]).(*exprpb.Expr) opID := p.helper.id(op) - b.addTerm(opID, next) + l.addTerm(opID, next) } - return b.balance() + return l.toExpr() } // Visit a parse tree produced by CELParser#conditionalAnd. func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { result := p.Visit(ctx.GetE()).(*exprpb.Expr) - b := newBalancer(p.helper, operators.LogicalAnd, result) + l := p.newLogicManager(operators.LogicalAnd, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { @@ -504,9 +507,9 @@ func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { } next := p.Visit(rest[i]).(*exprpb.Expr) opID := p.helper.id(op) - b.addTerm(opID, next) + l.addTerm(opID, next) } - return b.balance() + return l.toExpr() } // Visit a parse tree produced by CELParser#relation. @@ -867,18 +870,24 @@ func (p *parser) unquote(ctx any, value string, isBytes bool) string { return text } +func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager { + if p.enableVariadicOperatorASTs { + return newVariadicLogicManager(p.helper, function, term) + } + return newBalancingLogicManager(p.helper, function, term) +} + func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr { var location common.Location - switch ctx.(type) { + err := p.helper.newExpr(ctx) + switch c := ctx.(type) { case common.Location: - location = ctx.(common.Location) + location = c case antlr.Token, antlr.ParserRuleContext: - err := p.helper.newExpr(ctx) location = p.helper.getLocation(err.GetId()) } - err := p.helper.newExpr(ctx) // Provide arguments to the report error. - p.errors.ReportError(location, format, args...) + p.errors.reportErrorAtID(err.GetId(), location, format, args...) return err } diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3a..c9fb829dc 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc..c35112927 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d99..5232b4867 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..ba9dd5eb6 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,75 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t := timeNow().UnixMilli() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (uuid[6] & 0x0F) + // uuid[8] has already has right version +} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..cd3fcd1ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..1931f4006 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..2517a2871 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,39 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + + +--- + +âš ï¸ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..2efd83555 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,422 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..331eebc85 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1230 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..8db0cef95 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 000000000..c64f8c829 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..d0742bf2a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..36250ca7c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..c854225e9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..e0f466b72 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..24d53b38a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,365 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 000000000..a62b68ccb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 000000000..e1b2b44f6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..7bf2f66c6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 000000000..2e668f6b8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/mxk/go-flowrate/LICENSE b/vendor/github.com/mxk/go-flowrate/LICENSE new file mode 100644 index 000000000..e9f9f628b --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + + * Neither the name of the go-flowrate project nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go new file mode 100644 index 000000000..1b727721e --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go @@ -0,0 +1,267 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/io.go b/vendor/github.com/mxk/go-flowrate/flowrate/io.go new file mode 100644 index 000000000..fbe090972 --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/util.go b/vendor/github.com/mxk/go-flowrate/flowrate/util.go new file mode 100644 index 000000000..4caac583f --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return time.Unix(0, int64(czero+c)) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go index 0ac7d819e..57a15c9a1 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -9,6 +9,5 @@ Usage: if selinux.EnforceMode() != selinux.Enforcing { selinux.SetEnforceMode(selinux.Enforcing) } - */ package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go index fea096c18..07e0f77dc 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -78,6 +78,9 @@ func ReleaseLabel(label string) error { // Deprecated: use selinux.DupSecOpt var DupSecOpt = selinux.DupSecOpt +// FormatMountLabel returns a string to be used by the mount command. Using +// the SELinux `context` mount option. Changing labels of files on mount +// points with this option can never be changed. // FormatMountLabel returns a string to be used by the mount command. // The format of this string will be used to alter the labeling of the mountpoint. // The string returned is suitable to be used as the options field of the mount command. @@ -85,12 +88,27 @@ var DupSecOpt = selinux.DupSecOpt // the first parameter. Second parameter is the label that you wish to apply // to all content in the mount point. func FormatMountLabel(src, mountLabel string) string { + return FormatMountLabelByType(src, mountLabel, "context") +} + +// FormatMountLabelByType returns a string to be used by the mount command. +// Allow caller to specify the mount options. For example using the SELinux +// `fscontext` mount option would allow certain container processes to change +// labels of files created on the mount points, where as `context` option does +// not. +// FormatMountLabelByType returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabelByType(src, mountLabel, contextType string) string { if mountLabel != "" { switch src { case "": - src = fmt.Sprintf("context=%q", mountLabel) + src = fmt.Sprintf("%s=%q", contextType, mountLabel) default: - src = fmt.Sprintf("%s,context=%q", src, mountLabel) + src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel) } } return src diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index 12de0ae5d..f61a56015 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -3,8 +3,6 @@ package label import ( "errors" "fmt" - "os" - "os/user" "strings" "github.com/opencontainers/selinux/go-selinux" @@ -113,50 +111,6 @@ func Relabel(path string, fileLabel string, shared bool) error { return nil } - exclude_paths := map[string]bool{ - "/": true, - "/bin": true, - "/boot": true, - "/dev": true, - "/etc": true, - "/etc/passwd": true, - "/etc/pki": true, - "/etc/shadow": true, - "/home": true, - "/lib": true, - "/lib64": true, - "/media": true, - "/opt": true, - "/proc": true, - "/root": true, - "/run": true, - "/sbin": true, - "/srv": true, - "/sys": true, - "/tmp": true, - "/usr": true, - "/var": true, - "/var/lib": true, - "/var/log": true, - } - - if home := os.Getenv("HOME"); home != "" { - exclude_paths[home] = true - } - - if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { - if usr, err := user.Lookup(sudoUser); err == nil { - exclude_paths[usr.HomeDir] = true - } - } - - if path != "/" { - path = strings.TrimSuffix(path, "/") - } - if exclude_paths[path] { - return fmt.Errorf("SELinux relabeling of %s is not allowed", path) - } - if shared { c, err := selinux.NewContext(fileLabel) if err != nil { diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index 02d206239..f21c80c5a 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package label diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go deleted file mode 100644 index 897ecbac4..000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux,go1.16 - -package selinux - -import ( - "errors" - "io/fs" - "os" - - "github.com/opencontainers/selinux/pkg/pwalkdir" -) - -func rchcon(fpath, label string) error { - return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { - e := setFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go deleted file mode 100644 index 2c8b033ce..000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux,!go1.16 - -package selinux - -import ( - "errors" - "os" - - "github.com/opencontainers/selinux/pkg/pwalk" -) - -func rchcon(fpath, label string) error { - return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error { - e := setFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index 5a59d151f..af058b84b 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -23,8 +23,13 @@ var ( // ErrEmptyPath is returned when an empty path has been specified. ErrEmptyPath = errors.New("empty path") + // ErrInvalidLabel is returned when an invalid label is specified. + ErrInvalidLabel = errors.New("invalid Label") + // InvalidLabel is returned when an invalid label is specified. - InvalidLabel = errors.New("Invalid Label") + // + // Deprecated: use [ErrInvalidLabel]. + InvalidLabel = ErrInvalidLabel // ErrIncomparable is returned two levels are not comparable ErrIncomparable = errors.New("incomparable levels") @@ -144,7 +149,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) { // of the program is finished to guarantee another goroutine does not migrate to the current // thread before execution is complete. func SetExecLabel(label string) error { - return setExecLabel(label) + return writeCon(attrPath("exec"), label) } // SetTaskLabel sets the SELinux label for the current thread, or an error. @@ -152,21 +157,21 @@ func SetExecLabel(label string) error { // be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee // the current thread does not run in a new mislabeled thread. func SetTaskLabel(label string) error { - return setTaskLabel(label) + return writeCon(attrPath("current"), label) } // SetSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created. Calls to SetSocketLabel // should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until -// the the socket is created to guarantee another goroutine does not migrate +// the socket is created to guarantee another goroutine does not migrate // to the current thread before execution is complete. func SetSocketLabel(label string) error { - return setSocketLabel(label) + return writeCon(attrPath("sockcreate"), label) } // SocketLabel retrieves the current socket label setting func SocketLabel() (string, error) { - return socketLabel() + return readCon(attrPath("sockcreate")) } // PeerLabel retrieves the label of the client on the other side of a socket @@ -185,7 +190,7 @@ func SetKeyLabel(label string) error { // KeyLabel retrieves the current kernel keyring label setting func KeyLabel() (string, error) { - return keyLabel() + return readCon("/proc/self/attr/keycreate") } // Get returns the Context as a string @@ -208,6 +213,11 @@ func ReserveLabel(label string) { reserveLabel(label) } +// MLSEnabled checks if MLS is enabled. +func MLSEnabled() bool { + return isMLSEnabled() +} + // EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func EnforceMode() int { return enforceMode() @@ -220,7 +230,7 @@ func SetEnforceMode(mode int) error { } // DefaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func DefaultEnforceMode() int { return defaultEnforceMode() @@ -266,7 +276,7 @@ func CopyLevel(src, dest string) (string, error) { return copyLevel(src, dest) } -// Chcon changes the fpath file object to the SELinux label label. +// Chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then Chcon walks the // directory tree setting the label. // @@ -284,7 +294,7 @@ func DupSecOpt(src string) ([]string, error) { // DisableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. func DisableSecOpt() []string { - return disableSecOpt() + return []string{"disable"} } // GetDefaultContextWithLevel gets a single context for the specified SELinux user diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index ee602ab96..f1e95977d 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -8,15 +8,16 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "io/fs" "math/big" "os" - "path" + "os/user" "path/filepath" "strconv" "strings" "sync" + "github.com/opencontainers/selinux/pkg/pwalkdir" "golang.org/x/sys/unix" ) @@ -34,17 +35,17 @@ const ( ) type selinuxState struct { + mcsList map[string]bool + selinuxfs string + selinuxfsOnce sync.Once enabledSet bool enabled bool - selinuxfsOnce sync.Once - selinuxfs string - mcsList map[string]bool sync.Mutex } type level struct { - sens uint cats *big.Int + sens uint } type mlsRange struct { @@ -53,10 +54,10 @@ type mlsRange struct { } type defaultSECtx struct { - user, level, scon string - userRdr, defaultRdr io.Reader - - verifier func(string) error + userRdr io.Reader + verifier func(string) error + defaultRdr io.Reader + user, level, scon string } type levelItem byte @@ -154,7 +155,7 @@ func findSELinuxfs() string { } // check if selinuxfs is available before going the slow path - fs, err := ioutil.ReadFile("/proc/filesystems") + fs, err := os.ReadFile("/proc/filesystems") if err != nil { return "" } @@ -291,7 +292,7 @@ func readCon(fpath string) (string, error) { } func readConFd(in *os.File) (string, error) { - data, err := ioutil.ReadAll(in) + data, err := io.ReadAll(in) if err != nil { return "", err } @@ -304,7 +305,7 @@ func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) - indexB, err := ioutil.ReadFile(indexpath) + indexB, err := os.ReadFile(indexpath) if err != nil { return -1, err } @@ -390,21 +391,19 @@ func lFileLabel(fpath string) (string, error) { return string(label), nil } -// setFSCreateLabel tells kernel the label to create all file system objects -// created by this task. Setting label="" to return to default. func setFSCreateLabel(label string) error { - return writeAttr("fscreate", label) + return writeCon(attrPath("fscreate"), label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { - return readAttr("fscreate") + return readCon(attrPath("fscreate")) } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { - return readAttr("current") + return readCon(attrPath("current")) } // pidLabel returns the SELinux label of the given pid, or an error. @@ -415,7 +414,7 @@ func pidLabel(pid int) (string, error) { // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { - return readAttr("exec") + return readCon(attrPath("exec")) } func writeCon(fpath, val string) error { @@ -461,18 +460,10 @@ func attrPath(attr string) string { }) if haveThreadSelf { - return path.Join(threadSelfPrefix, attr) + return filepath.Join(threadSelfPrefix, attr) } - return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) -} - -func readAttr(attr string) (string, error) { - return readCon(attrPath(attr)) -} - -func writeAttr(attr, val string) error { - return writeCon(attrPath(attr), val) + return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr) } // canonicalizeContext takes a context string and writes it to the kernel @@ -559,30 +550,30 @@ func (l *level) parseLevel(levelStr string) error { // rangeStrToMLSRange marshals a string representation of a range. func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { - mlsRange := &mlsRange{} - levelSlice := strings.SplitN(rangeStr, "-", 2) + r := &mlsRange{} + l := strings.SplitN(rangeStr, "-", 2) - switch len(levelSlice) { + switch len(l) { // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 case 2: - mlsRange.high = &level{} - if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { - return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err) + r.high = &level{} + if err := r.high.parseLevel(l[1]); err != nil { + return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: - mlsRange.low = &level{} - if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { - return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err) + r.low = &level{} + if err := r.low.parseLevel(l[0]); err != nil { + return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err) } } - if mlsRange.high == nil { - mlsRange.high = mlsRange.low + if r.high == nil { + r.high = r.low } - return mlsRange, nil + return r, nil } // bitsetToStr takes a category bitset and returns it in the @@ -616,17 +607,17 @@ func bitsetToStr(c *big.Int) string { return str } -func (l1 *level) equal(l2 *level) bool { - if l2 == nil || l1 == nil { - return l1 == l2 +func (l *level) equal(l2 *level) bool { + if l2 == nil || l == nil { + return l == l2 } - if l1.sens != l2.sens { + if l2.sens != l.sens { return false } - if l2.cats == nil || l1.cats == nil { - return l2.cats == l1.cats + if l2.cats == nil || l.cats == nil { + return l2.cats == l.cats } - return l1.cats.Cmp(l2.cats) == 0 + return l.cats.Cmp(l2.cats) == 0 } // String returns an mlsRange as a string. @@ -720,36 +711,13 @@ func readWriteCon(fpath string, val string) (string, error) { return readConFd(f) } -// setExecLabel sets the SELinux label that the kernel will use for any programs -// that are executed by the current process thread, or an error. -func setExecLabel(label string) error { - return writeAttr("exec", label) -} - -// setTaskLabel sets the SELinux label for the current thread, or an error. -// This requires the dyntransition permission. -func setTaskLabel(label string) error { - return writeAttr("current", label) -} - -// setSocketLabel takes a process label and tells the kernel to assign the -// label to the next socket that gets created -func setSocketLabel(label string) error { - return writeAttr("sockcreate", label) -} - -// socketLabel retrieves the current socket label setting -func socketLabel() (string, error) { - return readAttr("sockcreate") -} - // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { - label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) if err != nil { return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err} } - return label, nil + return l, nil } // setKeyLabel takes a process label and tells the kernel to assign the @@ -765,15 +733,10 @@ func setKeyLabel(label string) error { return err } -// keyLabel retrieves the current kernel keyring label setting -func keyLabel() (string, error) { - return readCon("/proc/self/attr/keycreate") -} - // get returns the Context as a string func (c Context) get() string { - if level := c["level"]; level != "" { - return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level + if l := c["level"]; l != "" { + return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l } return c["user"] + ":" + c["role"] + ":" + c["type"] } @@ -785,7 +748,7 @@ func newContext(label string) (Context, error) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) < 3 { - return c, InvalidLabel + return c, ErrInvalidLabel } c["user"] = con[0] c["role"] = con[1] @@ -815,14 +778,23 @@ func reserveLabel(label string) { } func selinuxEnforcePath() string { - return path.Join(getSelinuxMountPoint(), "enforce") + return filepath.Join(getSelinuxMountPoint(), "enforce") +} + +// isMLSEnabled checks if MLS is enabled. +func isMLSEnabled() bool { + enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls")) + if err != nil { + return false + } + return bytes.Equal(enabledB, []byte{'1'}) } // enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func enforceMode() int { var enforce int - enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) + enforceB, err := os.ReadFile(selinuxEnforcePath()) if err != nil { return -1 } @@ -836,11 +808,12 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func defaultEnforceMode() int { switch readConfig(selinuxTag) { @@ -940,7 +913,7 @@ func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } - return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts")) + return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts")) } func loadLabels() { @@ -1043,7 +1016,8 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on @@ -1072,22 +1046,7 @@ func copyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// Prevent users from relabeling system files -func badPrefix(fpath string) error { - if fpath == "" { - return ErrEmptyPath - } - - badPrefixes := []string{"/usr"} - for _, prefix := range badPrefixes { - if strings.HasPrefix(fpath, prefix) { - return fmt.Errorf("relabeling content in %s is not allowed", prefix) - } - } - return nil -} - -// chcon changes the fpath file object to the SELinux label label. +// chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. func chcon(fpath string, label string, recurse bool) error { @@ -1097,17 +1056,97 @@ func chcon(fpath string, label string, recurse bool) error { if label == "" { return nil } - if err := badPrefix(fpath); err != nil { - return err + + excludePaths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := os.Getenv("HOME"); home != "" { + excludePaths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + excludePaths[usr.HomeDir] = true + } + } + + if fpath != "/" { + fpath = strings.TrimSuffix(fpath, "/") + } + if excludePaths[fpath] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) } if !recurse { - return setFileLabel(fpath, label) + err := lSetFileLabel(fpath, label) + if err != nil { + // Check if file doesn't exist, must have been removed + if errors.Is(err, os.ErrNotExist) { + return nil + } + // Check if current label is correct on disk + flabel, nerr := lFileLabel(fpath) + if nerr == nil && flabel == label { + return nil + } + // Check if file doesn't exist, must have been removed + if errors.Is(nerr, os.ErrNotExist) { + return nil + } + return err + } + return nil } return rchcon(fpath, label) } +func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } + return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + err := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + }) +} + // dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. func dupSecOpt(src string) ([]string, error) { @@ -1136,12 +1175,6 @@ func dupSecOpt(src string) ([]string, error) { return dup, nil } -// disableSecOpt returns a security opt that can be used to disable SELinux -// labeling support for future container processes. -func disableSecOpt() []string { - return []string{"disable"} -} - // findUserInContext scans the reader for a valid SELinux context // match that is verified with the verifier. Invalid contexts are // skipped. It returns a matched context or an empty string if no diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 78743b020..bc3fd3b37 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -1,10 +1,22 @@ +//go:build !linux // +build !linux package selinux -func setDisabled() { +func attrPath(string) string { + return "" +} + +func readCon(fpath string) (string, error) { + return "", nil +} + +func writeCon(string, string) error { + return nil } +func setDisabled() {} + func getEnabled() bool { return false } @@ -61,22 +73,6 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { return "", nil } -func setExecLabel(label string) error { - return nil -} - -func setTaskLabel(label string) error { - return nil -} - -func setSocketLabel(label string) error { - return nil -} - -func socketLabel() (string, error) { - return "", nil -} - func peerLabel(fd uintptr) (string, error) { return "", nil } @@ -85,17 +81,12 @@ func setKeyLabel(label string) error { return nil } -func keyLabel() (string, error) { - return "", nil -} - func (c Context) get() string { return "" } func newContext(label string) (Context, error) { - c := make(Context) - return c, nil + return Context{}, nil } func clearLabels() { @@ -104,6 +95,10 @@ func clearLabels() { func reserveLabel(label string) { } +func isMLSEnabled() bool { + return false +} + func enforceMode() int { return Disabled } @@ -151,10 +146,6 @@ func dupSecOpt(src string) ([]string, error) { return nil, nil } -func disableSecOpt() []string { - return []string{"disable"} -} - func getDefaultContextWithLevel(user, level, scon string) (string, error) { return "", nil } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md deleted file mode 100644 index 7e78dce01..000000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md +++ /dev/null @@ -1,48 +0,0 @@ -## pwalk: parallel implementation of filepath.Walk - -This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk) -which may speed it up by calling multiple callback functions (WalkFunc) in parallel, -utilizing goroutines. - -By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. -This can be changed by using WalkN function which has the additional -parameter, specifying the number of goroutines (concurrency). - -### pwalk vs pwalkdir - -This package is deprecated in favor of -[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), -which is faster, but requires at least Go 1.16. - -### Caveats - -Please note the following limitations of this code: - -* Unlike filepath.Walk, the order of calls is non-deterministic; - -* Only primitive error handling is supported: - - * filepath.SkipDir is not supported; - - * no errors are ever passed to WalkFunc; - - * once any error is returned from any WalkFunc instance, no more new calls - to WalkFunc are made, and the error is returned to the caller of Walk; - - * if more than one walkFunc instance will return an error, only one - of such errors will be propagated and returned by Walk, others - will be silently discarded. - -### Documentation - -For the official documentation, see -https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc - -### Benchmarks - -For a WalkFunc that consists solely of the return statement, this -implementation is about 10% slower than the standard library's -filepath.Walk. - -Otherwise (if a WalkFunc is doing something) this is usually faster, -except when the WalkN(..., 1) is used. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go deleted file mode 100644 index 202c80da5..000000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ /dev/null @@ -1,115 +0,0 @@ -package pwalk - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "sync" -) - -type WalkFunc = filepath.WalkFunc - -// Walk is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// twice the runtime.NumCPU() walkFn will be called at any one time. -// If you want to change the maximum, use WalkN instead. -// -// The order of calls is non-deterministic. -// -// Note that this implementation only supports primitive error handling: -// -// - no errors are ever passed to walkFn; -// -// - once a walkFn returns any error, all further processing stops -// and the error is returned to the caller of Walk; -// -// - filepath.SkipDir is not supported; -// -// - if more than one walkFn instance will return an error, only one -// of such errors will be propagated and returned by Walk, others -// will be silently discarded. -func Walk(root string, walkFn WalkFunc) error { - return WalkN(root, walkFn, runtime.NumCPU()*2) -} - -// WalkN is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// num walkFn will be called at any one time. -// -// Please see Walk documentation for caveats of using this function. -func WalkN(root string, walkFn WalkFunc, num int) error { - // make sure limit is sensible - if num < 1 { - return fmt.Errorf("walk(%q): num must be > 0", root) - } - - files := make(chan *walkArgs, 2*num) - errCh := make(chan error, 1) // get the first error, ignore others - - // Start walking a tree asap - var ( - err error - wg sync.WaitGroup - - rootLen = len(root) - rootEntry *walkArgs - ) - wg.Add(1) - go func() { - err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { - if err != nil { - close(files) - return err - } - if len(p) == rootLen { - // Root entry is processed separately below. - rootEntry = &walkArgs{path: p, info: &info} - return nil - } - // add a file to the queue unless a callback sent an error - select { - case e := <-errCh: - close(files) - return e - default: - files <- &walkArgs{path: p, info: &info} - return nil - } - }) - if err == nil { - close(files) - } - wg.Done() - }() - - wg.Add(num) - for i := 0; i < num; i++ { - go func() { - for file := range files { - if e := walkFn(file.path, *file.info, nil); e != nil { - select { - case errCh <- e: // sent ok - default: // buffer full - } - } - } - wg.Done() - }() - } - - wg.Wait() - - if err == nil { - err = walkFn(rootEntry.path, *rootEntry.info, nil) - } - - return err -} - -// walkArgs holds the arguments that were passed to the Walk or WalkN -// functions. -type walkArgs struct { - path string - info *os.FileInfo -} diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go index a5796b2c4..0f5d9f580 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -111,6 +111,6 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { // walkArgs holds the arguments that were passed to the Walk or WalkN // functions. type walkArgs struct { - path string entry fs.DirEntry + path string } diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go index 34c182442..8120907d9 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go @@ -6154,264 +6154,264 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 4107 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0xc9, - 0x75, 0xe6, 0x00, 0xc4, 0xed, 0xe0, 0x42, 0xb0, 0x79, 0x11, 0x84, 0x95, 0x28, 0x6e, 0x6b, 0xa5, - 0xe5, 0x4a, 0xbb, 0xc4, 0x9a, 0xb6, 0xb3, 0x55, 0x4a, 0xe2, 0x18, 0x22, 0xb1, 0x12, 0x97, 0x14, - 0xc9, 0x1d, 0x42, 0xda, 0x4b, 0xb9, 0xc2, 0x1a, 0x02, 0x2d, 0x72, 0x42, 0x60, 0x06, 0x9e, 0x19, - 0x40, 0xe4, 0xe6, 0xe2, 0x94, 0xcb, 0x71, 0x25, 0xaf, 0x76, 0x55, 0x2a, 0x79, 0x48, 0x5e, 0x52, - 0x29, 0x97, 0x1f, 0xfc, 0x9c, 0xbf, 0x90, 0xa7, 0x5c, 0x2a, 0x7f, 0x20, 0xb5, 0xf1, 0x4b, 0xf2, - 0x23, 0x52, 0xae, 0xbe, 0xcd, 0xf4, 0xdc, 0x40, 0xd9, 0xd8, 0xdd, 0x17, 0x11, 0x7d, 0xfa, 0xf4, - 0xf9, 0x4e, 0x9f, 0xee, 0x3e, 0xe7, 0xf4, 0xe9, 0x11, 0x94, 0x9c, 0x51, 0x6f, 0x73, 0xe4, 0xd8, - 0x9e, 0x8d, 0x2a, 0xc4, 0xeb, 0xf5, 0x5d, 0xe2, 0x4c, 0x88, 0x33, 0x3a, 0x6d, 0x2e, 0x9f, 0xd9, - 0x67, 0x36, 0xeb, 0x68, 0xd1, 0x5f, 0x9c, 0xa7, 0xd9, 0xa0, 0x3c, 0x2d, 0x63, 0x64, 0xb6, 0x86, - 0x93, 0x5e, 0x6f, 0x74, 0xda, 0xba, 0x98, 0x88, 0x9e, 0xa6, 0xdf, 0x63, 0x8c, 0xbd, 0xf3, 0xd1, - 0x29, 0xfb, 0x23, 0xfa, 0x6e, 0x9d, 0xd9, 0xf6, 0xd9, 0x80, 0xf0, 0x5e, 0xcb, 0xb2, 0x3d, 0xc3, - 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0xaf, 0x34, 0xa8, 0xe9, 0xc4, 0x1d, 0xd9, 0x96, 0x4b, 0x9e, - 0x12, 0xa3, 0x4f, 0x1c, 0x74, 0x1b, 0xa0, 0x37, 0x18, 0xbb, 0x1e, 0x71, 0x4e, 0xcc, 0x7e, 0x43, - 0x5b, 0xd7, 0x36, 0xe6, 0xf5, 0x92, 0xa0, 0xec, 0xf6, 0xd1, 0x1b, 0x50, 0x1a, 0x92, 0xe1, 0x29, - 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf6, 0x51, 0x13, 0x8a, 0x0e, 0x99, 0x98, 0xae, 0x69, - 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, 0x2f, 0xbd, 0x13, 0x8f, - 0x38, 0xc3, 0xc6, 0x3c, 0x1f, 0x48, 0x09, 0x5d, 0xe2, 0x0c, 0xf1, 0x4f, 0x72, 0x50, 0xd1, 0x0d, - 0xeb, 0x8c, 0xe8, 0xe4, 0x87, 0x63, 0xe2, 0x7a, 0xa8, 0x0e, 0xd9, 0x0b, 0x72, 0xc5, 0xe0, 0x2b, - 0x3a, 0xfd, 0xc9, 0xc7, 0x5b, 0x67, 0xe4, 0x84, 0x58, 0x1c, 0xb8, 0x42, 0xc7, 0x5b, 0x67, 0xa4, - 0x63, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x04, 0x2a, 0x6f, 0x84, 0xd4, 0x99, 0x8f, - 0xa8, 0xb3, 0x0d, 0xe0, 0xda, 0x8e, 0x77, 0x62, 0x3b, 0x7d, 0xe2, 0x34, 0x72, 0xeb, 0xda, 0x46, - 0x6d, 0xeb, 0xad, 0x4d, 0x75, 0x19, 0x36, 0x55, 0x85, 0x36, 0x8f, 0x6d, 0xc7, 0x3b, 0xa4, 0xbc, - 0x7a, 0xc9, 0x95, 0x3f, 0xd1, 0x87, 0x50, 0x66, 0x42, 0x3c, 0xc3, 0x39, 0x23, 0x5e, 0x23, 0xcf, - 0xa4, 0xdc, 0xbb, 0x46, 0x4a, 0x97, 0x31, 0xeb, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x2e, 0x71, - 0x4c, 0x63, 0x60, 0x7e, 0x61, 0x9c, 0x0e, 0x48, 0xa3, 0xb0, 0xae, 0x6d, 0x14, 0xf5, 0x10, 0x8d, - 0xce, 0xff, 0x82, 0x5c, 0xb9, 0x27, 0xb6, 0x35, 0xb8, 0x6a, 0x14, 0x19, 0x43, 0x91, 0x12, 0x0e, - 0xad, 0xc1, 0x15, 0x5b, 0x34, 0x7b, 0x6c, 0x79, 0xbc, 0xb7, 0xc4, 0x7a, 0x4b, 0x8c, 0xc2, 0xba, - 0x37, 0xa0, 0x3e, 0x34, 0xad, 0x93, 0xa1, 0xdd, 0x3f, 0xf1, 0x0d, 0x02, 0xcc, 0x20, 0xb5, 0xa1, - 0x69, 0x3d, 0xb3, 0xfb, 0xba, 0x34, 0x0b, 0xe5, 0x34, 0x2e, 0xc3, 0x9c, 0x65, 0xc1, 0x69, 0x5c, - 0xaa, 0x9c, 0x9b, 0xb0, 0x44, 0x65, 0xf6, 0x1c, 0x62, 0x78, 0x24, 0x60, 0xae, 0x30, 0xe6, 0xc5, - 0xa1, 0x69, 0x6d, 0xb3, 0x9e, 0x10, 0xbf, 0x71, 0x19, 0xe3, 0xaf, 0x0a, 0x7e, 0xe3, 0x32, 0xcc, - 0x8f, 0x37, 0xa1, 0xe4, 0xdb, 0x1c, 0x15, 0x61, 0xfe, 0xe0, 0xf0, 0xa0, 0x53, 0x9f, 0x43, 0x00, - 0xf9, 0xf6, 0xf1, 0x76, 0xe7, 0x60, 0xa7, 0xae, 0xa1, 0x32, 0x14, 0x76, 0x3a, 0xbc, 0x91, 0xc1, - 0x8f, 0x01, 0x02, 0xeb, 0xa2, 0x02, 0x64, 0xf7, 0x3a, 0x9f, 0xd5, 0xe7, 0x28, 0xcf, 0x8b, 0x8e, - 0x7e, 0xbc, 0x7b, 0x78, 0x50, 0xd7, 0xe8, 0xe0, 0x6d, 0xbd, 0xd3, 0xee, 0x76, 0xea, 0x19, 0xca, - 0xf1, 0xec, 0x70, 0xa7, 0x9e, 0x45, 0x25, 0xc8, 0xbd, 0x68, 0xef, 0x3f, 0xef, 0xd4, 0xe7, 0xf1, - 0xcf, 0x35, 0xa8, 0x8a, 0xf5, 0xe2, 0x67, 0x02, 0x7d, 0x07, 0xf2, 0xe7, 0xec, 0x5c, 0xb0, 0xad, - 0x58, 0xde, 0xba, 0x15, 0x59, 0xdc, 0xd0, 0xd9, 0xd1, 0x05, 0x2f, 0xc2, 0x90, 0xbd, 0x98, 0xb8, - 0x8d, 0xcc, 0x7a, 0x76, 0xa3, 0xbc, 0x55, 0xdf, 0xe4, 0xe7, 0x75, 0x73, 0x8f, 0x5c, 0xbd, 0x30, - 0x06, 0x63, 0xa2, 0xd3, 0x4e, 0x84, 0x60, 0x7e, 0x68, 0x3b, 0x84, 0xed, 0xd8, 0xa2, 0xce, 0x7e, - 0xd3, 0x6d, 0xcc, 0x16, 0x4d, 0xec, 0x56, 0xde, 0xc0, 0xbf, 0xd4, 0x00, 0x8e, 0xc6, 0x5e, 0xfa, - 0xd1, 0x58, 0x86, 0xdc, 0x84, 0x0a, 0x16, 0xc7, 0x82, 0x37, 0xd8, 0x99, 0x20, 0x86, 0x4b, 0xfc, - 0x33, 0x41, 0x1b, 0xe8, 0x06, 0x14, 0x46, 0x0e, 0x99, 0x9c, 0x5c, 0x4c, 0x18, 0x48, 0x51, 0xcf, - 0xd3, 0xe6, 0xde, 0x04, 0xbd, 0x09, 0x15, 0xf3, 0xcc, 0xb2, 0x1d, 0x72, 0xc2, 0x65, 0xe5, 0x58, - 0x6f, 0x99, 0xd3, 0x98, 0xde, 0x0a, 0x0b, 0x17, 0x9c, 0x57, 0x59, 0xf6, 0x29, 0x09, 0x5b, 0x50, - 0x66, 0xaa, 0xce, 0x64, 0xbe, 0x77, 0x02, 0x1d, 0x33, 0x6c, 0x58, 0xdc, 0x84, 0x42, 0x6b, 0xfc, - 0x03, 0x40, 0x3b, 0x64, 0x40, 0x3c, 0x32, 0x8b, 0xf7, 0x50, 0x6c, 0x92, 0x55, 0x6d, 0x82, 0x7f, - 0xa6, 0xc1, 0x52, 0x48, 0xfc, 0x4c, 0xd3, 0x6a, 0x40, 0xa1, 0xcf, 0x84, 0x71, 0x0d, 0xb2, 0xba, - 0x6c, 0xa2, 0x87, 0x50, 0x14, 0x0a, 0xb8, 0x8d, 0x6c, 0xca, 0xa6, 0x29, 0x70, 0x9d, 0x5c, 0xfc, - 0xcb, 0x0c, 0x94, 0xc4, 0x44, 0x0f, 0x47, 0xa8, 0x0d, 0x55, 0x87, 0x37, 0x4e, 0xd8, 0x7c, 0x84, - 0x46, 0xcd, 0x74, 0x27, 0xf4, 0x74, 0x4e, 0xaf, 0x88, 0x21, 0x8c, 0x8c, 0x7e, 0x1f, 0xca, 0x52, - 0xc4, 0x68, 0xec, 0x09, 0x93, 0x37, 0xc2, 0x02, 0x82, 0xfd, 0xf7, 0x74, 0x4e, 0x07, 0xc1, 0x7e, - 0x34, 0xf6, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x96, 0x49, 0x59, 0x0f, 0x4b, - 0x89, 0x2f, 0xd5, 0xd3, 0x39, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0xaa, 0x92, 0x77, 0xc9, 0x9d, 0x77, - 0x4c, 0xa5, 0xee, 0xa5, 0x15, 0x57, 0xa9, 0x7b, 0x69, 0x3d, 0x2e, 0x41, 0x41, 0xb4, 0xf0, 0xbf, - 0x64, 0x00, 0xe4, 0x6a, 0x1c, 0x8e, 0xd0, 0x0e, 0xd4, 0x1c, 0xd1, 0x0a, 0x59, 0xeb, 0x8d, 0x44, - 0x6b, 0x89, 0x45, 0x9c, 0xd3, 0xab, 0x72, 0x10, 0x57, 0xee, 0x7b, 0x50, 0xf1, 0xa5, 0x04, 0x06, - 0xbb, 0x99, 0x60, 0x30, 0x5f, 0x42, 0x59, 0x0e, 0xa0, 0x26, 0xfb, 0x04, 0x56, 0xfc, 0xf1, 0x09, - 0x36, 0x7b, 0x73, 0x8a, 0xcd, 0x7c, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc0, 0x6c, - 0x37, 0x13, 0xcc, 0x16, 0x57, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0xbf, 0x59, 0x28, - 0x6c, 0xdb, 0xc3, 0x91, 0xe1, 0xd0, 0xd5, 0xc8, 0x3b, 0xc4, 0x1d, 0x0f, 0x3c, 0x66, 0xae, 0xda, - 0xd6, 0xdd, 0xb0, 0x44, 0xc1, 0x26, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43, 0xe8, 0x60, 0x11, 0x1e, - 0x33, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x6c, 0x70, 0x90, 0x9b, 0x50, 0x98, - 0x10, 0x27, 0x08, 0xe9, 0x4f, 0xe7, 0x74, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x34, 0xbc, 0xe4, 0x04, - 0x4f, 0xad, 0x17, 0x8e, 0x46, 0x77, 0xa1, 0x12, 0x8a, 0x71, 0x79, 0xc1, 0x57, 0x1e, 0x2a, 0x21, - 0x6e, 0x55, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0xe7, 0xa4, 0x67, 0x5d, 0x95, 0x9e, 0xb5, 0x28, - 0x46, 0x09, 0xdf, 0x1a, 0x72, 0x32, 0xdf, 0x0f, 0x3b, 0x19, 0xfc, 0x7d, 0xa8, 0x86, 0x0c, 0x44, - 0xe3, 0x4e, 0xe7, 0xe3, 0xe7, 0xed, 0x7d, 0x1e, 0xa4, 0x9e, 0xb0, 0xb8, 0xa4, 0xd7, 0x35, 0x1a, - 0xeb, 0xf6, 0x3b, 0xc7, 0xc7, 0xf5, 0x0c, 0xaa, 0x42, 0xe9, 0xe0, 0xb0, 0x7b, 0xc2, 0xb9, 0xb2, - 0xf8, 0x89, 0x2f, 0x41, 0x04, 0x39, 0x25, 0xb6, 0xcd, 0x29, 0xb1, 0x4d, 0x93, 0xb1, 0x2d, 0x13, - 0xc4, 0x36, 0x16, 0xe6, 0xf6, 0x3b, 0xed, 0xe3, 0x4e, 0x7d, 0xfe, 0x71, 0x0d, 0x2a, 0xdc, 0xbe, - 0x27, 0x63, 0x8b, 0x86, 0xda, 0x7f, 0xd2, 0x00, 0x82, 0xd3, 0x84, 0x5a, 0x50, 0xe8, 0x71, 0x9c, - 0x86, 0xc6, 0x9c, 0xd1, 0x4a, 0xe2, 0x92, 0xe9, 0x92, 0x0b, 0x7d, 0x0b, 0x0a, 0xee, 0xb8, 0xd7, - 0x23, 0xae, 0x0c, 0x79, 0x37, 0xa2, 0xfe, 0x50, 0x78, 0x2b, 0x5d, 0xf2, 0xd1, 0x21, 0x2f, 0x0d, - 0x73, 0x30, 0x66, 0x01, 0x70, 0xfa, 0x10, 0xc1, 0x87, 0xff, 0x5e, 0x83, 0xb2, 0xb2, 0x79, 0x7f, - 0x47, 0x27, 0x7c, 0x0b, 0x4a, 0x4c, 0x07, 0xd2, 0x17, 0x6e, 0xb8, 0xa8, 0x07, 0x04, 0xf4, 0x7b, - 0x50, 0x92, 0x27, 0x40, 0x7a, 0xe2, 0x46, 0xb2, 0xd8, 0xc3, 0x91, 0x1e, 0xb0, 0xe2, 0x3d, 0x58, - 0x64, 0x56, 0xe9, 0xd1, 0xe4, 0x5a, 0xda, 0x51, 0x4d, 0x3f, 0xb5, 0x48, 0xfa, 0xd9, 0x84, 0xe2, - 0xe8, 0xfc, 0xca, 0x35, 0x7b, 0xc6, 0x40, 0x68, 0xe1, 0xb7, 0xf1, 0x47, 0x80, 0x54, 0x61, 0xb3, - 0x4c, 0x17, 0x57, 0xa1, 0xfc, 0xd4, 0x70, 0xcf, 0x85, 0x4a, 0xf8, 0x21, 0x54, 0x69, 0x73, 0xef, - 0xc5, 0x6b, 0xe8, 0xc8, 0x2e, 0x07, 0x92, 0x7b, 0x26, 0x9b, 0x23, 0x98, 0x3f, 0x37, 0xdc, 0x73, - 0x36, 0xd1, 0xaa, 0xce, 0x7e, 0xa3, 0x77, 0xa0, 0xde, 0xe3, 0x93, 0x3c, 0x89, 0x5c, 0x19, 0x16, - 0x04, 0xdd, 0xcf, 0x04, 0x3f, 0x85, 0x0a, 0x9f, 0xc3, 0x57, 0xad, 0x04, 0x5e, 0x84, 0x85, 0x63, - 0xcb, 0x18, 0xb9, 0xe7, 0xb6, 0x8c, 0x6e, 0x74, 0xd2, 0xf5, 0x80, 0x36, 0x13, 0xe2, 0xdb, 0xb0, - 0xe0, 0x90, 0xa1, 0x61, 0x5a, 0xa6, 0x75, 0x76, 0x72, 0x7a, 0xe5, 0x11, 0x57, 0x5c, 0x98, 0x6a, - 0x3e, 0xf9, 0x31, 0xa5, 0x52, 0xd5, 0x4e, 0x07, 0xf6, 0xa9, 0x70, 0x73, 0xec, 0x37, 0xfe, 0x69, - 0x06, 0x2a, 0x9f, 0x18, 0x5e, 0x4f, 0x2e, 0x1d, 0xda, 0x85, 0x9a, 0xef, 0xdc, 0x18, 0x45, 0xe8, - 0x12, 0x09, 0xb1, 0x6c, 0x8c, 0x4c, 0xa5, 0x65, 0x74, 0xac, 0xf6, 0x54, 0x02, 0x13, 0x65, 0x58, - 0x3d, 0x32, 0xf0, 0x45, 0x65, 0xd2, 0x45, 0x31, 0x46, 0x55, 0x94, 0x4a, 0x40, 0x87, 0x50, 0x1f, - 0x39, 0xf6, 0x99, 0x43, 0x5c, 0xd7, 0x17, 0xc6, 0xc3, 0x18, 0x4e, 0x10, 0x76, 0x24, 0x58, 0x03, - 0x71, 0x0b, 0xa3, 0x30, 0xe9, 0xf1, 0x42, 0x90, 0xcf, 0x70, 0xe7, 0xf4, 0x9f, 0x19, 0x40, 0xf1, - 0x49, 0xfd, 0xb6, 0x29, 0xde, 0x3d, 0xa8, 0xb9, 0x9e, 0xe1, 0xc4, 0x36, 0x5b, 0x95, 0x51, 0x7d, - 0x8f, 0xff, 0x36, 0xf8, 0x0a, 0x9d, 0x58, 0xb6, 0x67, 0xbe, 0xbc, 0x12, 0x59, 0x72, 0x4d, 0x92, - 0x0f, 0x18, 0x15, 0x75, 0xa0, 0xf0, 0xd2, 0x1c, 0x78, 0xc4, 0x71, 0x1b, 0xb9, 0xf5, 0xec, 0x46, - 0x6d, 0xeb, 0xe1, 0x75, 0xcb, 0xb0, 0xf9, 0x21, 0xe3, 0xef, 0x5e, 0x8d, 0x88, 0x2e, 0xc7, 0xaa, - 0x99, 0x67, 0x3e, 0x94, 0x8d, 0xdf, 0x84, 0xe2, 0x2b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, 0x59, - 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe9, 0x18, 0x67, 0x43, 0x62, 0x79, 0xf2, 0x1e, 0x28, 0xdb, 0xf8, - 0x1e, 0x40, 0x00, 0x43, 0x5d, 0xfe, 0xc1, 0xe1, 0xd1, 0xf3, 0x6e, 0x7d, 0x0e, 0x55, 0xa0, 0x78, - 0x70, 0xb8, 0xd3, 0xd9, 0xef, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xa1, 0xb5, 0x54, 0x31, 0xb5, - 0x10, 0x26, 0x5e, 0x85, 0xe5, 0xa4, 0x05, 0xa4, 0xb9, 0x68, 0x55, 0xec, 0xd2, 0x99, 0x8e, 0x8a, - 0x0a, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0x22, 0x39, 0x97, 0x4d, 0x6a, 0x08, - 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x74, 0x2f, 0xb9, 0x44, 0xf7, 0x82, 0xee, 0x42, - 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x72, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, 0xd2, 0x42, 0x46, 0x2f, - 0x84, 0x8d, 0x8e, 0xee, 0x41, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, 0x8b, 0x18, 0x55, 0x99, - 0xbb, 0x77, 0x28, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xee, 0x48, 0x4f, 0x1c, 0xc3, 0x52, - 0x2f, 0x73, 0xdd, 0xee, 0xbe, 0x30, 0x37, 0xfd, 0x89, 0x6a, 0x90, 0xd9, 0xdd, 0x11, 0x46, 0xc8, - 0xec, 0xee, 0xe0, 0x1f, 0x6b, 0x80, 0xd4, 0x71, 0x33, 0xd9, 0x39, 0x22, 0x5c, 0xc2, 0x67, 0x03, - 0xf8, 0x65, 0xc8, 0x11, 0xc7, 0xb1, 0x1d, 0x66, 0xd1, 0x92, 0xce, 0x1b, 0xf8, 0x2d, 0xa1, 0x83, - 0x4e, 0x26, 0xf6, 0x85, 0x7f, 0x06, 0xb9, 0x34, 0xcd, 0x57, 0x75, 0x0f, 0x96, 0x42, 0x5c, 0x33, - 0x45, 0xae, 0x0f, 0x61, 0x81, 0x09, 0xdb, 0x3e, 0x27, 0xbd, 0x8b, 0x91, 0x6d, 0x5a, 0x31, 0x3c, - 0xba, 0x72, 0x81, 0x83, 0xa5, 0xf3, 0xe0, 0x13, 0xab, 0xf8, 0xc4, 0x6e, 0x77, 0x1f, 0x7f, 0x06, - 0xab, 0x11, 0x39, 0x52, 0xfd, 0x3f, 0x82, 0x72, 0xcf, 0x27, 0xba, 0x22, 0xd7, 0xb9, 0x1d, 0x56, - 0x2e, 0x3a, 0x54, 0x1d, 0x81, 0x0f, 0xe1, 0x46, 0x4c, 0xf4, 0x4c, 0x73, 0x7e, 0x1b, 0x56, 0x98, - 0xc0, 0x3d, 0x42, 0x46, 0xed, 0x81, 0x39, 0x49, 0xb5, 0xf4, 0x48, 0x4c, 0x4a, 0x61, 0xfc, 0x7a, - 0xf7, 0x05, 0xfe, 0x03, 0x81, 0xd8, 0x35, 0x87, 0xa4, 0x6b, 0xef, 0xa7, 0xeb, 0x46, 0xa3, 0xd9, - 0x05, 0xb9, 0x72, 0x45, 0x5a, 0xc3, 0x7e, 0xe3, 0x7f, 0xd6, 0x84, 0xa9, 0xd4, 0xe1, 0x5f, 0xf3, - 0x4e, 0x5e, 0x03, 0x38, 0xa3, 0x47, 0x86, 0xf4, 0x69, 0x07, 0xaf, 0xa8, 0x28, 0x14, 0x5f, 0x4f, - 0xea, 0xbf, 0x2b, 0x42, 0xcf, 0x65, 0xb1, 0xcf, 0xd9, 0x3f, 0xbe, 0x97, 0xbb, 0x0d, 0x65, 0x46, - 0x38, 0xf6, 0x0c, 0x6f, 0xec, 0xc6, 0x16, 0xe3, 0x2f, 0xc4, 0xb6, 0x97, 0x83, 0x66, 0x9a, 0xd7, - 0xb7, 0x20, 0xcf, 0x2e, 0x13, 0x32, 0x95, 0xbe, 0x99, 0xb0, 0x1f, 0xb9, 0x1e, 0xba, 0x60, 0xc4, - 0x3f, 0xd5, 0x20, 0xff, 0x8c, 0x95, 0x60, 0x15, 0xd5, 0xe6, 0xe5, 0x5a, 0x58, 0xc6, 0x90, 0x17, - 0x86, 0x4a, 0x3a, 0xfb, 0xcd, 0x52, 0x4f, 0x42, 0x9c, 0xe7, 0xfa, 0x3e, 0x4f, 0x71, 0x4b, 0xba, - 0xdf, 0xa6, 0x36, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, 0x2a, 0x14, 0x9a, 0x3d, - 0x9b, 0xee, 0x3e, 0x31, 0x1c, 0x4b, 0x14, 0x4d, 0x8b, 0x7a, 0x40, 0xc0, 0xfb, 0x50, 0xe7, 0x7a, - 0xb4, 0xfb, 0x7d, 0x25, 0xc1, 0xf4, 0xd1, 0xb4, 0x08, 0x5a, 0x48, 0x5a, 0x26, 0x2a, 0xed, 0x17, - 0x1a, 0x2c, 0x2a, 0xe2, 0x66, 0xb2, 0xea, 0xbb, 0x90, 0xe7, 0x45, 0x6a, 0x91, 0xe9, 0x2c, 0x87, - 0x47, 0x71, 0x18, 0x5d, 0xf0, 0xa0, 0x4d, 0x28, 0xf0, 0x5f, 0xf2, 0x0e, 0x90, 0xcc, 0x2e, 0x99, - 0xf0, 0x3d, 0x58, 0x12, 0x24, 0x32, 0xb4, 0x93, 0x0e, 0x06, 0x5b, 0x0c, 0xfc, 0x67, 0xb0, 0x1c, - 0x66, 0x9b, 0x69, 0x4a, 0x8a, 0x92, 0x99, 0xd7, 0x51, 0xb2, 0x2d, 0x95, 0x7c, 0x3e, 0xea, 0x2b, - 0x79, 0x54, 0x74, 0xc7, 0xa8, 0xeb, 0x95, 0x09, 0xaf, 0x57, 0x30, 0x01, 0x29, 0xe2, 0x1b, 0x9d, - 0xc0, 0x07, 0x72, 0x3b, 0xec, 0x9b, 0xae, 0xef, 0xc3, 0x31, 0x54, 0x06, 0xa6, 0x45, 0x0c, 0x47, - 0x54, 0xce, 0x35, 0x5e, 0x39, 0x57, 0x69, 0xf8, 0x0b, 0x40, 0xea, 0xc0, 0x6f, 0x54, 0xe9, 0xfb, - 0xd2, 0x64, 0x47, 0x8e, 0x3d, 0xb4, 0x53, 0xcd, 0x8e, 0xff, 0x1c, 0x56, 0x22, 0x7c, 0xdf, 0xa8, - 0x9a, 0x4b, 0xb0, 0xb8, 0x43, 0x64, 0x42, 0x23, 0xdd, 0xde, 0x47, 0x80, 0x54, 0xe2, 0x4c, 0x91, - 0xad, 0x05, 0x8b, 0xcf, 0xec, 0x09, 0x75, 0x91, 0x94, 0x1a, 0xf8, 0x06, 0x5e, 0x87, 0xf0, 0x4d, - 0xe1, 0xb7, 0x29, 0xb8, 0x3a, 0x60, 0x26, 0xf0, 0x7f, 0xd7, 0xa0, 0xd2, 0x1e, 0x18, 0xce, 0x50, - 0x02, 0x7f, 0x0f, 0xf2, 0xfc, 0x76, 0x2d, 0x0a, 0x5a, 0xf7, 0xc3, 0x62, 0x54, 0x5e, 0xde, 0x68, - 0xf3, 0xbb, 0xb8, 0x18, 0x45, 0x15, 0x17, 0x6f, 0x5e, 0x3b, 0x91, 0x37, 0xb0, 0x1d, 0xf4, 0x1e, - 0xe4, 0x0c, 0x3a, 0x84, 0x85, 0xa2, 0x5a, 0xb4, 0xae, 0xc1, 0xa4, 0xb1, 0x3b, 0x00, 0xe7, 0xc2, - 0xdf, 0x81, 0xb2, 0x82, 0x80, 0x0a, 0x90, 0x7d, 0xd2, 0x11, 0x09, 0x7b, 0x7b, 0xbb, 0xbb, 0xfb, - 0x82, 0x17, 0x74, 0x6a, 0x00, 0x3b, 0x1d, 0xbf, 0x9d, 0xc1, 0x9f, 0x8a, 0x51, 0xc2, 0xed, 0xab, - 0xfa, 0x68, 0x69, 0xfa, 0x64, 0x5e, 0x4b, 0x9f, 0x4b, 0xa8, 0x8a, 0xe9, 0xcf, 0x1a, 0xc6, 0x98, - 0xbc, 0x94, 0x30, 0xa6, 0x28, 0xaf, 0x0b, 0x46, 0xfc, 0x2b, 0x0d, 0xea, 0x3b, 0xf6, 0x2b, 0xeb, - 0xcc, 0x31, 0xfa, 0xfe, 0x39, 0xf9, 0x30, 0xb2, 0x52, 0x9b, 0x91, 0xe2, 0x68, 0x84, 0x3f, 0x20, - 0x44, 0x56, 0xac, 0x11, 0x94, 0x0d, 0x79, 0x2c, 0x94, 0x4d, 0xfc, 0x01, 0x2c, 0x44, 0x06, 0x51, - 0xdb, 0xbf, 0x68, 0xef, 0xef, 0xee, 0x50, 0x5b, 0xb3, 0xc2, 0x5a, 0xe7, 0xa0, 0xfd, 0x78, 0xbf, - 0x23, 0x1e, 0x90, 0xda, 0x07, 0xdb, 0x9d, 0xfd, 0x7a, 0x06, 0xf7, 0x60, 0x51, 0x81, 0x9f, 0xf5, - 0x65, 0x20, 0x45, 0xbb, 0x05, 0xa8, 0x8a, 0x68, 0x2f, 0x0e, 0xe5, 0xbf, 0x65, 0xa0, 0x26, 0x29, - 0x5f, 0x0f, 0x26, 0x5a, 0x85, 0x7c, 0xff, 0xf4, 0xd8, 0xfc, 0x42, 0xbe, 0x1c, 0x89, 0x16, 0xa5, - 0x0f, 0x38, 0x0e, 0x7f, 0xbe, 0x15, 0x2d, 0x1a, 0xc6, 0x1d, 0xe3, 0xa5, 0xb7, 0x6b, 0xf5, 0xc9, - 0x25, 0x4b, 0x0a, 0xe6, 0xf5, 0x80, 0xc0, 0x2a, 0x4c, 0xe2, 0x99, 0x97, 0xdd, 0xac, 0x94, 0x67, - 0x5f, 0xf4, 0x00, 0xea, 0xf4, 0x77, 0x7b, 0x34, 0x1a, 0x98, 0xa4, 0xcf, 0x05, 0x14, 0x18, 0x4f, - 0x8c, 0x4e, 0xd1, 0xd9, 0x5d, 0xc4, 0x6d, 0x14, 0x59, 0x58, 0x12, 0x2d, 0xb4, 0x0e, 0x65, 0xae, - 0xdf, 0xae, 0xf5, 0xdc, 0x25, 0xec, 0xed, 0x33, 0xab, 0xab, 0xa4, 0x70, 0x9a, 0x01, 0xd1, 0x34, - 0x63, 0x09, 0x16, 0xdb, 0x63, 0xef, 0xbc, 0x63, 0xd1, 0x58, 0x21, 0xad, 0xbc, 0x0c, 0x88, 0x12, - 0x77, 0x4c, 0x57, 0xa5, 0x0a, 0xd6, 0xf0, 0x82, 0x74, 0x60, 0x89, 0x12, 0x89, 0xe5, 0x99, 0x3d, - 0x25, 0xae, 0xca, 0xcc, 0x4b, 0x8b, 0x64, 0x5e, 0x86, 0xeb, 0xbe, 0xb2, 0x9d, 0xbe, 0xb0, 0xb9, - 0xdf, 0xc6, 0xff, 0xa8, 0x71, 0xc8, 0xe7, 0x6e, 0x28, 0x7d, 0xfa, 0x2d, 0xc5, 0xa0, 0xf7, 0xa1, - 0x60, 0x8f, 0xd8, 0x0b, 0xbf, 0x28, 0xc3, 0xac, 0x6e, 0xf2, 0x6f, 0x02, 0x36, 0x85, 0xe0, 0x43, - 0xde, 0xab, 0x4b, 0x36, 0x74, 0x1f, 0x6a, 0xe7, 0x86, 0x7b, 0x4e, 0xfa, 0x47, 0x52, 0x26, 0xbf, - 0xf9, 0x45, 0xa8, 0x78, 0x23, 0xd0, 0xef, 0x09, 0xf1, 0xa6, 0xe8, 0x87, 0x1f, 0xc2, 0x8a, 0xe4, - 0x14, 0xaf, 0x13, 0x53, 0x98, 0x5f, 0xc1, 0x6d, 0xc9, 0xbc, 0x7d, 0x6e, 0x58, 0x67, 0x44, 0x02, - 0xfe, 0xae, 0x16, 0x88, 0xcf, 0x27, 0x9b, 0x38, 0x9f, 0xc7, 0xd0, 0xf0, 0xe7, 0xc3, 0x6e, 0xd6, - 0xf6, 0x40, 0x55, 0x74, 0xec, 0x8a, 0xf3, 0x54, 0xd2, 0xd9, 0x6f, 0x4a, 0x73, 0xec, 0x81, 0x9f, - 0x4a, 0xd3, 0xdf, 0x78, 0x1b, 0x6e, 0x4a, 0x19, 0xe2, 0xce, 0x1b, 0x16, 0x12, 0x53, 0x3c, 0x49, - 0x88, 0x30, 0x2c, 0x1d, 0x3a, 0x7d, 0xe1, 0x55, 0xce, 0xf0, 0x12, 0x30, 0x99, 0x9a, 0x22, 0x73, - 0x85, 0x6f, 0x4a, 0xaa, 0x98, 0x92, 0x2d, 0x49, 0x32, 0x15, 0xa0, 0x92, 0xc5, 0x82, 0x51, 0x72, - 0x6c, 0xc1, 0x62, 0xa2, 0x7f, 0x00, 0x6b, 0xbe, 0x12, 0xd4, 0x6e, 0x47, 0xc4, 0x19, 0x9a, 0xae, - 0xab, 0xd4, 0xbd, 0x93, 0x26, 0x7e, 0x1f, 0xe6, 0x47, 0x44, 0x04, 0xa1, 0xf2, 0x16, 0x92, 0x9b, - 0x52, 0x19, 0xcc, 0xfa, 0x71, 0x1f, 0xee, 0x48, 0xe9, 0xdc, 0xa2, 0x89, 0xe2, 0xa3, 0x4a, 0xc9, - 0x6a, 0x60, 0x26, 0xa5, 0x1a, 0x98, 0x8d, 0xbc, 0xc5, 0x7c, 0xc4, 0x0d, 0x29, 0xcf, 0xfc, 0x4c, - 0xc9, 0xc5, 0x1e, 0xb7, 0xa9, 0xef, 0x2a, 0x66, 0x12, 0xf6, 0xd7, 0xc2, 0x0b, 0x7c, 0x55, 0x1e, - 0x9e, 0xb0, 0x19, 0xca, 0x87, 0x0e, 0xd9, 0xa4, 0x59, 0x33, 0x5d, 0x00, 0x5d, 0xad, 0x85, 0xce, - 0xeb, 0x21, 0x1a, 0x3e, 0x85, 0xe5, 0xb0, 0x5f, 0x9b, 0x49, 0x97, 0x65, 0xc8, 0x79, 0xf6, 0x05, - 0x91, 0xb1, 0x86, 0x37, 0xa4, 0xed, 0x7c, 0x9f, 0x37, 0x93, 0xed, 0x8c, 0x40, 0x18, 0x3b, 0x1d, - 0xb3, 0xea, 0x4b, 0x37, 0x96, 0xbc, 0x03, 0xf1, 0x06, 0x3e, 0x80, 0xd5, 0xa8, 0x67, 0x9b, 0x49, - 0xe5, 0x17, 0xfc, 0x2c, 0x25, 0x39, 0xbf, 0x99, 0xe4, 0x7e, 0x1c, 0xf8, 0x25, 0xc5, 0xb7, 0xcd, - 0x24, 0x52, 0x87, 0x66, 0x92, 0xab, 0xfb, 0x2a, 0x8e, 0x8e, 0xef, 0xf9, 0x66, 0x12, 0xe6, 0x06, - 0xc2, 0x66, 0x5f, 0xfe, 0xc0, 0x5d, 0x65, 0xa7, 0xba, 0x2b, 0x71, 0x48, 0x02, 0x87, 0xfa, 0x35, - 0x6c, 0x3a, 0x81, 0x11, 0xf8, 0xf2, 0x59, 0x31, 0x68, 0x38, 0xf3, 0x31, 0x58, 0x43, 0x6e, 0x6c, - 0x35, 0x02, 0xcc, 0xb4, 0x18, 0x9f, 0x04, 0x6e, 0x3c, 0x16, 0x24, 0x66, 0x12, 0xfc, 0x29, 0xac, - 0xa7, 0xc7, 0x87, 0x59, 0x24, 0x3f, 0x68, 0x41, 0xc9, 0xbf, 0x0c, 0x29, 0xdf, 0x9b, 0x95, 0xa1, - 0x70, 0x70, 0x78, 0x7c, 0xd4, 0xde, 0xee, 0xf0, 0x0f, 0xce, 0xb6, 0x0f, 0x75, 0xfd, 0xf9, 0x51, - 0xb7, 0x9e, 0xd9, 0xfa, 0x75, 0x16, 0x32, 0x7b, 0x2f, 0xd0, 0x67, 0x90, 0xe3, 0x5f, 0x5f, 0x4c, - 0xf9, 0xe4, 0xa6, 0x39, 0xed, 0x03, 0x13, 0x7c, 0xe3, 0xc7, 0xff, 0xf5, 0xeb, 0x9f, 0x67, 0x16, - 0x71, 0xa5, 0x35, 0xf9, 0x76, 0xeb, 0x62, 0xd2, 0x62, 0x61, 0xea, 0x91, 0xf6, 0x00, 0x7d, 0x0c, - 0xd9, 0xa3, 0xb1, 0x87, 0x52, 0x3f, 0xc5, 0x69, 0xa6, 0x7f, 0x73, 0x82, 0x57, 0x98, 0xd0, 0x05, - 0x0c, 0x42, 0xe8, 0x68, 0xec, 0x51, 0x91, 0x3f, 0x84, 0xb2, 0xfa, 0xc5, 0xc8, 0xb5, 0xdf, 0xe7, - 0x34, 0xaf, 0xff, 0x1a, 0x05, 0xdf, 0x66, 0x50, 0x37, 0x30, 0x12, 0x50, 0xfc, 0x9b, 0x16, 0x75, - 0x16, 0xdd, 0x4b, 0x0b, 0xa5, 0x7e, 0xbd, 0xd3, 0x4c, 0xff, 0x40, 0x25, 0x36, 0x0b, 0xef, 0xd2, - 0xa2, 0x22, 0xff, 0x44, 0x7c, 0x9b, 0xd2, 0xf3, 0xd0, 0x9d, 0x84, 0x6f, 0x13, 0xd4, 0x57, 0xf8, - 0xe6, 0x7a, 0x3a, 0x83, 0x00, 0xb9, 0xc5, 0x40, 0x56, 0xf1, 0xa2, 0x00, 0xe9, 0xf9, 0x2c, 0x8f, - 0xb4, 0x07, 0x5b, 0x3d, 0xc8, 0xb1, 0x17, 0x2e, 0xf4, 0xb9, 0xfc, 0xd1, 0x4c, 0x78, 0xea, 0x4b, - 0x59, 0xe8, 0xd0, 0xdb, 0x18, 0x5e, 0x66, 0x40, 0x35, 0x5c, 0xa2, 0x40, 0xec, 0x7d, 0xeb, 0x91, - 0xf6, 0x60, 0x43, 0x7b, 0x5f, 0xdb, 0xfa, 0x55, 0x0e, 0x72, 0xac, 0xb4, 0x8b, 0x2e, 0x00, 0x82, - 0xd7, 0x9e, 0xe8, 0xec, 0x62, 0xef, 0x47, 0xd1, 0xd9, 0xc5, 0x1f, 0x8a, 0x70, 0x93, 0x81, 0x2e, - 0xe3, 0x05, 0x0a, 0xca, 0x2a, 0xc6, 0x2d, 0x56, 0x04, 0xa7, 0x76, 0xfc, 0x1b, 0x4d, 0x54, 0xb6, - 0xf9, 0x59, 0x42, 0x49, 0xd2, 0x42, 0x4f, 0x3e, 0xd1, 0xed, 0x90, 0xf0, 0xdc, 0x83, 0xbf, 0xcb, - 0x00, 0x5b, 0xb8, 0x1e, 0x00, 0x3a, 0x8c, 0xe3, 0x91, 0xf6, 0xe0, 0xf3, 0x06, 0x5e, 0x12, 0x56, - 0x8e, 0xf4, 0xa0, 0x1f, 0x41, 0x2d, 0xfc, 0xa4, 0x81, 0xee, 0x26, 0x60, 0x45, 0x5f, 0x46, 0x9a, - 0x6f, 0x4d, 0x67, 0x12, 0x3a, 0xad, 0x31, 0x9d, 0x04, 0x38, 0x47, 0xbe, 0x20, 0x64, 0x64, 0x50, - 0x26, 0xb1, 0x06, 0xe8, 0x1f, 0x34, 0xf1, 0xe2, 0x14, 0xbc, 0x51, 0xa0, 0x24, 0xe9, 0xb1, 0x17, - 0x90, 0xe6, 0xbd, 0x6b, 0xb8, 0x84, 0x12, 0x7f, 0xc8, 0x94, 0xf8, 0x00, 0x2f, 0x07, 0x4a, 0x78, - 0xe6, 0x90, 0x78, 0xb6, 0xd0, 0xe2, 0xf3, 0x5b, 0xf8, 0x46, 0xc8, 0x38, 0xa1, 0xde, 0x60, 0xb1, - 0xf8, 0x3b, 0x43, 0xe2, 0x62, 0x85, 0xde, 0x2d, 0x12, 0x17, 0x2b, 0xfc, 0x48, 0x91, 0xb4, 0x58, - 0xfc, 0x55, 0x21, 0x69, 0xb1, 0xfc, 0x9e, 0xad, 0xff, 0x9b, 0x87, 0xc2, 0x36, 0xff, 0x26, 0x1c, - 0xd9, 0x50, 0xf2, 0xcb, 0xf4, 0x68, 0x2d, 0xa9, 0xce, 0x18, 0x5c, 0x6b, 0x9a, 0x77, 0x52, 0xfb, - 0x85, 0x42, 0x6f, 0x32, 0x85, 0xde, 0xc0, 0xab, 0x14, 0x59, 0x7c, 0x76, 0xde, 0xe2, 0xc5, 0xac, - 0x96, 0xd1, 0xef, 0x53, 0x43, 0xfc, 0x29, 0x54, 0xd4, 0x3a, 0x3a, 0x7a, 0x33, 0xb1, 0xb6, 0xa9, - 0x96, 0xe2, 0x9b, 0x78, 0x1a, 0x8b, 0x40, 0x7e, 0x8b, 0x21, 0xaf, 0xe1, 0x9b, 0x09, 0xc8, 0x0e, - 0x63, 0x0d, 0x81, 0xf3, 0x1a, 0x78, 0x32, 0x78, 0xa8, 0xc4, 0x9e, 0x0c, 0x1e, 0x2e, 0xa1, 0x4f, - 0x05, 0x1f, 0x33, 0x56, 0x0a, 0xee, 0x02, 0x04, 0x95, 0x6c, 0x94, 0x68, 0x4b, 0xe5, 0x5e, 0x17, - 0x75, 0x0e, 0xf1, 0x22, 0x38, 0xc6, 0x0c, 0x56, 0xec, 0xbb, 0x08, 0xec, 0xc0, 0x74, 0x3d, 0x7e, - 0x30, 0xab, 0xa1, 0xd2, 0x34, 0x4a, 0x9c, 0x4f, 0xb8, 0xbe, 0xdd, 0xbc, 0x3b, 0x95, 0x47, 0xa0, - 0xdf, 0x63, 0xe8, 0x77, 0x70, 0x33, 0x01, 0x7d, 0xc4, 0x79, 0xe9, 0x66, 0xfb, 0xff, 0x3c, 0x94, - 0x9f, 0x19, 0xa6, 0xe5, 0x11, 0xcb, 0xb0, 0x7a, 0x04, 0x9d, 0x42, 0x8e, 0x45, 0xea, 0xa8, 0x23, - 0x56, 0xcb, 0xb6, 0x51, 0x47, 0x1c, 0xaa, 0x69, 0xe2, 0x75, 0x06, 0xdc, 0xc4, 0x2b, 0x14, 0x78, - 0x18, 0x88, 0x6e, 0xb1, 0x52, 0x24, 0x9d, 0xf4, 0x4b, 0xc8, 0x8b, 0xd7, 0xbe, 0x88, 0xa0, 0x50, - 0xf1, 0xa7, 0x79, 0x2b, 0xb9, 0x33, 0x69, 0x2f, 0xab, 0x30, 0x2e, 0xe3, 0xa3, 0x38, 0x13, 0x80, - 0xa0, 0xc6, 0x1e, 0x5d, 0xd1, 0x58, 0x49, 0xbe, 0xb9, 0x9e, 0xce, 0x90, 0x64, 0x53, 0x15, 0xb3, - 0xef, 0xf3, 0x52, 0xdc, 0x3f, 0x86, 0xf9, 0xa7, 0x86, 0x7b, 0x8e, 0x22, 0xb1, 0x57, 0xf9, 0x56, - 0xac, 0xd9, 0x4c, 0xea, 0x12, 0x28, 0x77, 0x18, 0xca, 0x4d, 0xee, 0xca, 0x54, 0x94, 0x73, 0xc3, - 0xa5, 0x41, 0x0d, 0xf5, 0x21, 0xcf, 0x3f, 0x1d, 0x8b, 0xda, 0x2f, 0xf4, 0xf9, 0x59, 0xd4, 0x7e, - 0xe1, 0xaf, 0xcd, 0xae, 0x47, 0x19, 0x41, 0x51, 0x7e, 0xab, 0x85, 0x22, 0x0f, 0xf7, 0x91, 0xef, - 0xba, 0x9a, 0x6b, 0x69, 0xdd, 0x02, 0xeb, 0x2e, 0xc3, 0xba, 0x8d, 0x1b, 0xb1, 0xb5, 0x12, 0x9c, - 0x8f, 0xb4, 0x07, 0xef, 0x6b, 0xe8, 0x47, 0x00, 0xc1, 0xb3, 0x44, 0xec, 0x04, 0x46, 0x5f, 0x38, - 0x62, 0x27, 0x30, 0xf6, 0xa2, 0x81, 0x37, 0x19, 0xee, 0x06, 0xbe, 0x1b, 0xc5, 0xf5, 0x1c, 0xc3, - 0x72, 0x5f, 0x12, 0xe7, 0x3d, 0x5e, 0x65, 0x75, 0xcf, 0xcd, 0x11, 0x9d, 0xb2, 0x03, 0x25, 0xbf, - 0xea, 0x1c, 0xf5, 0xb6, 0xd1, 0x6a, 0x78, 0xd4, 0xdb, 0xc6, 0xca, 0xd5, 0x61, 0xb7, 0x13, 0xda, - 0x2d, 0x92, 0x95, 0x1e, 0xc0, 0x5f, 0xd4, 0x61, 0x9e, 0x66, 0xdd, 0x34, 0x39, 0x09, 0xea, 0x26, - 0xd1, 0xd9, 0xc7, 0xaa, 0xa8, 0xd1, 0xd9, 0xc7, 0x4b, 0x2e, 0xe1, 0xe4, 0x84, 0x5e, 0xb2, 0x5a, - 0xbc, 0x44, 0x41, 0x67, 0x6a, 0x43, 0x59, 0x29, 0xac, 0xa0, 0x04, 0x61, 0xe1, 0xf2, 0x6c, 0x34, - 0xdc, 0x25, 0x54, 0x65, 0xf0, 0x1b, 0x0c, 0x6f, 0x85, 0x87, 0x3b, 0x86, 0xd7, 0xe7, 0x1c, 0x14, - 0x50, 0xcc, 0x4e, 0x9c, 0xfb, 0x84, 0xd9, 0x85, 0xcf, 0xfe, 0x7a, 0x3a, 0x43, 0xea, 0xec, 0x82, - 0x83, 0xff, 0x0a, 0x2a, 0x6a, 0x79, 0x05, 0x25, 0x28, 0x1f, 0x29, 0x29, 0x47, 0xe3, 0x48, 0x52, - 0x75, 0x26, 0xec, 0xd9, 0x18, 0xa4, 0xa1, 0xb0, 0x51, 0xe0, 0x01, 0x14, 0x44, 0xbd, 0x25, 0xc9, - 0xa4, 0xe1, 0xf2, 0x73, 0x92, 0x49, 0x23, 0xc5, 0x9a, 0x70, 0xf6, 0xcc, 0x10, 0xe9, 0x95, 0x52, - 0xc6, 0x6a, 0x81, 0xf6, 0x84, 0x78, 0x69, 0x68, 0x41, 0x25, 0x33, 0x0d, 0x4d, 0xb9, 0xce, 0xa7, - 0xa1, 0x9d, 0x11, 0x4f, 0xf8, 0x03, 0x79, 0x4d, 0x46, 0x29, 0xc2, 0xd4, 0xf8, 0x88, 0xa7, 0xb1, - 0x24, 0x5d, 0x6e, 0x02, 0x40, 0x19, 0x1c, 0x2f, 0x01, 0x82, 0x6a, 0x50, 0x34, 0x63, 0x4d, 0xac, - 0x82, 0x47, 0x33, 0xd6, 0xe4, 0x82, 0x52, 0xd8, 0xf7, 0x05, 0xb8, 0xfc, 0x6e, 0x45, 0x91, 0x7f, - 0xa6, 0x01, 0x8a, 0x17, 0x8e, 0xd0, 0xc3, 0x64, 0xe9, 0x89, 0xb5, 0xf5, 0xe6, 0xbb, 0xaf, 0xc7, - 0x9c, 0x14, 0xce, 0x02, 0x95, 0x7a, 0x8c, 0x7b, 0xf4, 0x8a, 0x2a, 0xf5, 0x97, 0x1a, 0x54, 0x43, - 0x55, 0x27, 0x74, 0x3f, 0x65, 0x4d, 0x23, 0x25, 0xf7, 0xe6, 0xdb, 0xd7, 0xf2, 0x25, 0xa5, 0xf2, - 0xca, 0x0e, 0x90, 0x77, 0x9a, 0x9f, 0x68, 0x50, 0x0b, 0x57, 0xa9, 0x50, 0x8a, 0xec, 0x58, 0xc9, - 0xbe, 0xb9, 0x71, 0x3d, 0xe3, 0xf4, 0xe5, 0x09, 0xae, 0x33, 0x03, 0x28, 0x88, 0xba, 0x56, 0xd2, - 0xc6, 0x0f, 0x17, 0xfb, 0x93, 0x36, 0x7e, 0xa4, 0x28, 0x96, 0xb0, 0xf1, 0x1d, 0x7b, 0x40, 0x94, - 0x63, 0x26, 0x0a, 0x5f, 0x69, 0x68, 0xd3, 0x8f, 0x59, 0xa4, 0x6a, 0x96, 0x86, 0x16, 0x1c, 0x33, - 0x59, 0xf1, 0x42, 0x29, 0xc2, 0xae, 0x39, 0x66, 0xd1, 0x82, 0x59, 0xc2, 0x31, 0x63, 0x80, 0xca, - 0x31, 0x0b, 0x6a, 0x53, 0x49, 0xc7, 0x2c, 0xf6, 0x76, 0x91, 0x74, 0xcc, 0xe2, 0xe5, 0xad, 0x84, - 0x75, 0x64, 0xb8, 0xa1, 0x63, 0xb6, 0x94, 0x50, 0xc6, 0x42, 0xef, 0xa6, 0x18, 0x31, 0xf1, 0x49, - 0xa4, 0xf9, 0xde, 0x6b, 0x72, 0xa7, 0xee, 0x71, 0x6e, 0x7e, 0xb9, 0xc7, 0xff, 0x56, 0x83, 0xe5, - 0xa4, 0x12, 0x18, 0x4a, 0xc1, 0x49, 0x79, 0x4a, 0x69, 0x6e, 0xbe, 0x2e, 0xfb, 0x74, 0x6b, 0xf9, - 0xbb, 0xfe, 0x71, 0xfd, 0x5f, 0xbf, 0x5c, 0xd3, 0xfe, 0xe3, 0xcb, 0x35, 0xed, 0xbf, 0xbf, 0x5c, - 0xd3, 0xfe, 0xee, 0x7f, 0xd6, 0xe6, 0x4e, 0xf3, 0xec, 0x3f, 0x1a, 0x7f, 0xfb, 0x37, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xee, 0x4f, 0x63, 0x90, 0xed, 0x3c, 0x00, 0x00, + // 4110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0xdd, 0x73, 0x1b, 0xc9, + 0x71, 0xe7, 0x02, 0x24, 0x01, 0x34, 0x3e, 0x08, 0x0e, 0x29, 0x0a, 0xc2, 0x49, 0x14, 0x6f, 0x74, + 0xd2, 0xf1, 0xa4, 0x3b, 0xe2, 0x4c, 0xdb, 0xb9, 0x2a, 0x25, 0x71, 0x0c, 0x91, 0x38, 0x89, 0x47, + 0x8a, 0xe4, 0x2d, 0x21, 0xdd, 0x47, 0xb9, 0xc2, 0x5a, 0x02, 0x23, 0x72, 0x43, 0x60, 0x17, 0xde, + 0x5d, 0x40, 0xe4, 0xe5, 0xc3, 0x2e, 0x97, 0xe3, 0x4a, 0x5e, 0xed, 0xaa, 0x54, 0xf2, 0x90, 0xbc, + 0xa4, 0x52, 0x2e, 0x3f, 0xf8, 0x39, 0xff, 0x42, 0x9e, 0xf2, 0x51, 0xf9, 0x07, 0x52, 0x17, 0xbf, + 0x24, 0x7f, 0x85, 0x6b, 0xbe, 0x76, 0x67, 0xf6, 0x83, 0x92, 0x8d, 0xbb, 0x7b, 0x11, 0x31, 0x3d, + 0x3d, 0xfd, 0xeb, 0xe9, 0x99, 0xe9, 0xee, 0xe9, 0x59, 0x41, 0xc9, 0x1b, 0xf5, 0x36, 0x46, 0x9e, + 0x1b, 0xb8, 0xa8, 0x42, 0x82, 0x5e, 0xdf, 0x27, 0xde, 0x84, 0x78, 0xa3, 0x93, 0xe6, 0xf2, 0xa9, + 0x7b, 0xea, 0xb2, 0x8e, 0x16, 0xfd, 0xc5, 0x79, 0x9a, 0x0d, 0xca, 0xd3, 0xb2, 0x46, 0x76, 0x6b, + 0x38, 0xe9, 0xf5, 0x46, 0x27, 0xad, 0xf3, 0x89, 0xe8, 0x69, 0x86, 0x3d, 0xd6, 0x38, 0x38, 0x1b, + 0x9d, 0xb0, 0x3f, 0xa2, 0xef, 0xe6, 0xa9, 0xeb, 0x9e, 0x0e, 0x08, 0xef, 0x75, 0x1c, 0x37, 0xb0, + 0x02, 0xdb, 0x75, 0x7c, 0xde, 0x8b, 0xff, 0xda, 0x80, 0x9a, 0x49, 0xfc, 0x91, 0xeb, 0xf8, 0xe4, + 0x09, 0xb1, 0xfa, 0xc4, 0x43, 0xb7, 0x00, 0x7a, 0x83, 0xb1, 0x1f, 0x10, 0xef, 0xd8, 0xee, 0x37, + 0x8c, 0x35, 0x63, 0x7d, 0xd6, 0x2c, 0x09, 0xca, 0x4e, 0x1f, 0xbd, 0x01, 0xa5, 0x21, 0x19, 0x9e, + 0xf0, 0xde, 0x1c, 0xeb, 0x2d, 0x72, 0xc2, 0x4e, 0x1f, 0x35, 0xa1, 0xe8, 0x91, 0x89, 0xed, 0xdb, + 0xae, 0xd3, 0xc8, 0xaf, 0x19, 0xeb, 0x79, 0x33, 0x6c, 0xd3, 0x81, 0x9e, 0xf5, 0x22, 0x38, 0x0e, + 0x88, 0x37, 0x6c, 0xcc, 0xf2, 0x81, 0x94, 0xd0, 0x25, 0xde, 0x10, 0xff, 0x74, 0x0e, 0x2a, 0xa6, + 0xe5, 0x9c, 0x12, 0x93, 0xfc, 0x70, 0x4c, 0xfc, 0x00, 0xd5, 0x21, 0x7f, 0x4e, 0x2e, 0x19, 0x7c, + 0xc5, 0xa4, 0x3f, 0xf9, 0x78, 0xe7, 0x94, 0x1c, 0x13, 0x87, 0x03, 0x57, 0xe8, 0x78, 0xe7, 0x94, + 0x74, 0x9c, 0x3e, 0x5a, 0x86, 0xb9, 0x81, 0x3d, 0xb4, 0x03, 0x81, 0xca, 0x1b, 0x9a, 0x3a, 0xb3, + 0x31, 0x75, 0xb6, 0x00, 0x7c, 0xd7, 0x0b, 0x8e, 0x5d, 0xaf, 0x4f, 0xbc, 0xc6, 0xdc, 0x9a, 0xb1, + 0x5e, 0xdb, 0x7c, 0x6b, 0x43, 0x5d, 0x86, 0x0d, 0x55, 0xa1, 0x8d, 0x23, 0xd7, 0x0b, 0x0e, 0x28, + 0xaf, 0x59, 0xf2, 0xe5, 0x4f, 0xf4, 0x21, 0x94, 0x99, 0x90, 0xc0, 0xf2, 0x4e, 0x49, 0xd0, 0x98, + 0x67, 0x52, 0xee, 0xbe, 0x42, 0x4a, 0x97, 0x31, 0x9b, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x3e, + 0xf1, 0x6c, 0x6b, 0x60, 0x7f, 0x61, 0x9d, 0x0c, 0x48, 0xa3, 0xb0, 0x66, 0xac, 0x17, 0x4d, 0x8d, + 0x46, 0xe7, 0x7f, 0x4e, 0x2e, 0xfd, 0x63, 0xd7, 0x19, 0x5c, 0x36, 0x8a, 0x8c, 0xa1, 0x48, 0x09, + 0x07, 0xce, 0xe0, 0x92, 0x2d, 0x9a, 0x3b, 0x76, 0x02, 0xde, 0x5b, 0x62, 0xbd, 0x25, 0x46, 0x61, + 0xdd, 0xeb, 0x50, 0x1f, 0xda, 0xce, 0xf1, 0xd0, 0xed, 0x1f, 0x87, 0x06, 0x01, 0x66, 0x90, 0xda, + 0xd0, 0x76, 0x9e, 0xba, 0x7d, 0x53, 0x9a, 0x85, 0x72, 0x5a, 0x17, 0x3a, 0x67, 0x59, 0x70, 0x5a, + 0x17, 0x2a, 0xe7, 0x06, 0x2c, 0x51, 0x99, 0x3d, 0x8f, 0x58, 0x01, 0x89, 0x98, 0x2b, 0x8c, 0x79, + 0x71, 0x68, 0x3b, 0x5b, 0xac, 0x47, 0xe3, 0xb7, 0x2e, 0x12, 0xfc, 0x55, 0xc1, 0x6f, 0x5d, 0xe8, + 0xfc, 0x78, 0x03, 0x4a, 0xa1, 0xcd, 0x51, 0x11, 0x66, 0xf7, 0x0f, 0xf6, 0x3b, 0xf5, 0x19, 0x04, + 0x30, 0xdf, 0x3e, 0xda, 0xea, 0xec, 0x6f, 0xd7, 0x0d, 0x54, 0x86, 0xc2, 0x76, 0x87, 0x37, 0x72, + 0xf8, 0x11, 0x40, 0x64, 0x5d, 0x54, 0x80, 0xfc, 0x6e, 0xe7, 0xb3, 0xfa, 0x0c, 0xe5, 0x79, 0xde, + 0x31, 0x8f, 0x76, 0x0e, 0xf6, 0xeb, 0x06, 0x1d, 0xbc, 0x65, 0x76, 0xda, 0xdd, 0x4e, 0x3d, 0x47, + 0x39, 0x9e, 0x1e, 0x6c, 0xd7, 0xf3, 0xa8, 0x04, 0x73, 0xcf, 0xdb, 0x7b, 0xcf, 0x3a, 0xf5, 0x59, + 0xfc, 0x0b, 0x03, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xf9, 0x33, 0x76, 0x2e, 0xd8, + 0x56, 0x2c, 0x6f, 0xde, 0x8c, 0x2d, 0xae, 0x76, 0x76, 0x4c, 0xc1, 0x8b, 0x30, 0xe4, 0xcf, 0x27, + 0x7e, 0x23, 0xb7, 0x96, 0x5f, 0x2f, 0x6f, 0xd6, 0x37, 0xf8, 0x79, 0xdd, 0xd8, 0x25, 0x97, 0xcf, + 0xad, 0xc1, 0x98, 0x98, 0xb4, 0x13, 0x21, 0x98, 0x1d, 0xba, 0x1e, 0x61, 0x3b, 0xb6, 0x68, 0xb2, + 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xaf, 0x0c, 0x80, 0xc3, 0x71, 0x90, + 0x7d, 0x34, 0x96, 0x61, 0x6e, 0x42, 0x05, 0x8b, 0x63, 0xc1, 0x1b, 0xec, 0x4c, 0x10, 0xcb, 0x27, + 0xe1, 0x99, 0xa0, 0x0d, 0x74, 0x1d, 0x0a, 0x23, 0x8f, 0x4c, 0x8e, 0xcf, 0x27, 0x0c, 0xa4, 0x68, + 0xce, 0xd3, 0xe6, 0xee, 0x04, 0xbd, 0x09, 0x15, 0xfb, 0xd4, 0x71, 0x3d, 0x72, 0xcc, 0x65, 0xcd, + 0xb1, 0xde, 0x32, 0xa7, 0x31, 0xbd, 0x15, 0x16, 0x2e, 0x78, 0x5e, 0x65, 0xd9, 0xa3, 0x24, 0xec, + 0x40, 0x99, 0xa9, 0x3a, 0x95, 0xf9, 0xde, 0x89, 0x74, 0xcc, 0xb1, 0x61, 0x49, 0x13, 0x0a, 0xad, + 0xf1, 0x0f, 0x00, 0x6d, 0x93, 0x01, 0x09, 0xc8, 0x34, 0xde, 0x43, 0xb1, 0x49, 0x5e, 0xb5, 0x09, + 0xfe, 0xb9, 0x01, 0x4b, 0x9a, 0xf8, 0xa9, 0xa6, 0xd5, 0x80, 0x42, 0x9f, 0x09, 0xe3, 0x1a, 0xe4, + 0x4d, 0xd9, 0x44, 0x0f, 0xa0, 0x28, 0x14, 0xf0, 0x1b, 0xf9, 0x8c, 0x4d, 0x53, 0xe0, 0x3a, 0xf9, + 0xf8, 0x57, 0x39, 0x28, 0x89, 0x89, 0x1e, 0x8c, 0x50, 0x1b, 0xaa, 0x1e, 0x6f, 0x1c, 0xb3, 0xf9, + 0x08, 0x8d, 0x9a, 0xd9, 0x4e, 0xe8, 0xc9, 0x8c, 0x59, 0x11, 0x43, 0x18, 0x19, 0xfd, 0x21, 0x94, + 0xa5, 0x88, 0xd1, 0x38, 0x10, 0x26, 0x6f, 0xe8, 0x02, 0xa2, 0xfd, 0xf7, 0x64, 0xc6, 0x04, 0xc1, + 0x7e, 0x38, 0x0e, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x9e, 0x49, 0x59, 0xd3, + 0xa5, 0x24, 0x97, 0xea, 0xc9, 0x8c, 0x89, 0xc4, 0x78, 0xa5, 0x53, 0x55, 0x29, 0xb8, 0xe0, 0xce, + 0x3b, 0xa1, 0x52, 0xf7, 0xc2, 0x49, 0xaa, 0xd4, 0xbd, 0x70, 0x1e, 0x95, 0xa0, 0x20, 0x5a, 0xf8, + 0x5f, 0x73, 0x00, 0x72, 0x35, 0x0e, 0x46, 0x68, 0x1b, 0x6a, 0x9e, 0x68, 0x69, 0xd6, 0x7a, 0x23, + 0xd5, 0x5a, 0x62, 0x11, 0x67, 0xcc, 0xaa, 0x1c, 0xc4, 0x95, 0xfb, 0x1e, 0x54, 0x42, 0x29, 0x91, + 0xc1, 0x6e, 0xa4, 0x18, 0x2c, 0x94, 0x50, 0x96, 0x03, 0xa8, 0xc9, 0x3e, 0x81, 0x6b, 0xe1, 0xf8, + 0x14, 0x9b, 0xbd, 0x79, 0x85, 0xcd, 0x42, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc8, + 0x6c, 0x37, 0x52, 0xcc, 0x96, 0x54, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0x7f, 0x79, + 0x28, 0x6c, 0xb9, 0xc3, 0x91, 0xe5, 0xd1, 0xd5, 0x98, 0xf7, 0x88, 0x3f, 0x1e, 0x04, 0xcc, 0x5c, + 0xb5, 0xcd, 0x3b, 0xba, 0x44, 0xc1, 0x26, 0xff, 0x9a, 0x8c, 0xd5, 0x14, 0x43, 0xe8, 0x60, 0x11, + 0x1e, 0x73, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x7c, 0x74, 0x90, 0x9b, 0x50, + 0x98, 0x10, 0x2f, 0x0a, 0xe9, 0x4f, 0x66, 0x4c, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x3c, 0xbc, 0xcc, + 0x09, 0x9e, 0x5a, 0x4f, 0x8f, 0x46, 0x77, 0xa0, 0xa2, 0xc5, 0xb8, 0x79, 0xc1, 0x57, 0x1e, 0x2a, + 0x21, 0x6e, 0x45, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0x66, 0xa4, 0x67, 0x5d, 0x91, 0x9e, 0xb5, + 0x28, 0x46, 0x09, 0xdf, 0xaa, 0x39, 0x99, 0xef, 0xeb, 0x4e, 0x06, 0x7f, 0x1f, 0xaa, 0x9a, 0x81, + 0x68, 0xdc, 0xe9, 0x7c, 0xfc, 0xac, 0xbd, 0xc7, 0x83, 0xd4, 0x63, 0x16, 0x97, 0xcc, 0xba, 0x41, + 0x63, 0xdd, 0x5e, 0xe7, 0xe8, 0xa8, 0x9e, 0x43, 0x55, 0x28, 0xed, 0x1f, 0x74, 0x8f, 0x39, 0x57, + 0x1e, 0x3f, 0x0e, 0x25, 0x88, 0x20, 0xa7, 0xc4, 0xb6, 0x19, 0x25, 0xb6, 0x19, 0x32, 0xb6, 0xe5, + 0xa2, 0xd8, 0xc6, 0xc2, 0xdc, 0x5e, 0xa7, 0x7d, 0xd4, 0xa9, 0xcf, 0x3e, 0xaa, 0x41, 0x85, 0xdb, + 0xf7, 0x78, 0xec, 0xd0, 0x50, 0xfb, 0xcf, 0x06, 0x40, 0x74, 0x9a, 0x50, 0x0b, 0x0a, 0x3d, 0x8e, + 0xd3, 0x30, 0x98, 0x33, 0xba, 0x96, 0xba, 0x64, 0xa6, 0xe4, 0x42, 0xdf, 0x82, 0x82, 0x3f, 0xee, + 0xf5, 0x88, 0x2f, 0x43, 0xde, 0xf5, 0xb8, 0x3f, 0x14, 0xde, 0xca, 0x94, 0x7c, 0x74, 0xc8, 0x0b, + 0xcb, 0x1e, 0x8c, 0x59, 0x00, 0xbc, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0x60, 0x40, 0x59, 0xd9, 0xbc, + 0xbf, 0xa7, 0x13, 0xbe, 0x09, 0x25, 0xa6, 0x03, 0xe9, 0x0b, 0x37, 0x5c, 0x34, 0x23, 0x02, 0xfa, + 0x03, 0x28, 0xc9, 0x13, 0x20, 0x3d, 0x71, 0x23, 0x5d, 0xec, 0xc1, 0xc8, 0x8c, 0x58, 0xf1, 0x2e, + 0x2c, 0x32, 0xab, 0xf4, 0x68, 0x72, 0x2d, 0xed, 0xa8, 0xa6, 0x9f, 0x46, 0x2c, 0xfd, 0x6c, 0x42, + 0x71, 0x74, 0x76, 0xe9, 0xdb, 0x3d, 0x6b, 0x20, 0xb4, 0x08, 0xdb, 0xf8, 0x23, 0x40, 0xaa, 0xb0, + 0x69, 0xa6, 0x8b, 0xab, 0x50, 0x7e, 0x62, 0xf9, 0x67, 0x42, 0x25, 0xfc, 0x00, 0xaa, 0xb4, 0xb9, + 0xfb, 0xfc, 0x35, 0x74, 0x64, 0x97, 0x03, 0xc9, 0x3d, 0x95, 0xcd, 0x11, 0xcc, 0x9e, 0x59, 0xfe, + 0x19, 0x9b, 0x68, 0xd5, 0x64, 0xbf, 0xd1, 0x3b, 0x50, 0xef, 0xf1, 0x49, 0x1e, 0xc7, 0xae, 0x0c, + 0x0b, 0x82, 0x1e, 0x66, 0x82, 0x9f, 0x42, 0x85, 0xcf, 0xe1, 0xab, 0x56, 0x02, 0x2f, 0xc2, 0xc2, + 0x91, 0x63, 0x8d, 0xfc, 0x33, 0x57, 0x46, 0x37, 0x3a, 0xe9, 0x7a, 0x44, 0x9b, 0x0a, 0xf1, 0x6d, + 0x58, 0xf0, 0xc8, 0xd0, 0xb2, 0x1d, 0xdb, 0x39, 0x3d, 0x3e, 0xb9, 0x0c, 0x88, 0x2f, 0x2e, 0x4c, + 0xb5, 0x90, 0xfc, 0x88, 0x52, 0xa9, 0x6a, 0x27, 0x03, 0xf7, 0x44, 0xb8, 0x39, 0xf6, 0x1b, 0xff, + 0x2c, 0x07, 0x95, 0x4f, 0xac, 0xa0, 0x27, 0x97, 0x0e, 0xed, 0x40, 0x2d, 0x74, 0x6e, 0x8c, 0x22, + 0x74, 0x89, 0x85, 0x58, 0x36, 0x46, 0xa6, 0xd2, 0x32, 0x3a, 0x56, 0x7b, 0x2a, 0x81, 0x89, 0xb2, + 0x9c, 0x1e, 0x19, 0x84, 0xa2, 0x72, 0xd9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, 0xa0, 0x03, 0xa8, + 0x8f, 0x3c, 0xf7, 0xd4, 0x23, 0xbe, 0x1f, 0x0a, 0xe3, 0x61, 0x0c, 0xa7, 0x08, 0x3b, 0x14, 0xac, + 0x91, 0xb8, 0x85, 0x91, 0x4e, 0x7a, 0xb4, 0x10, 0xe5, 0x33, 0xdc, 0x39, 0xfd, 0x57, 0x0e, 0x50, + 0x72, 0x52, 0xbf, 0x6b, 0x8a, 0x77, 0x17, 0x6a, 0x7e, 0x60, 0x79, 0x89, 0xcd, 0x56, 0x65, 0xd4, + 0xd0, 0xe3, 0xbf, 0x0d, 0xa1, 0x42, 0xc7, 0x8e, 0x1b, 0xd8, 0x2f, 0x2e, 0x45, 0x96, 0x5c, 0x93, + 0xe4, 0x7d, 0x46, 0x45, 0x1d, 0x28, 0xbc, 0xb0, 0x07, 0x01, 0xf1, 0xfc, 0xc6, 0xdc, 0x5a, 0x7e, + 0xbd, 0xb6, 0xf9, 0xe0, 0x55, 0xcb, 0xb0, 0xf1, 0x21, 0xe3, 0xef, 0x5e, 0x8e, 0x88, 0x29, 0xc7, + 0xaa, 0x99, 0xe7, 0xbc, 0x96, 0x8d, 0xdf, 0x80, 0xe2, 0x4b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, + 0x59, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe1, 0x59, 0xa7, 0x43, 0xe2, 0x04, 0xf2, 0x1e, 0x28, 0xdb, + 0xf8, 0x2e, 0x40, 0x04, 0x43, 0x5d, 0xfe, 0xfe, 0xc1, 0xe1, 0xb3, 0x6e, 0x7d, 0x06, 0x55, 0xa0, + 0xb8, 0x7f, 0xb0, 0xdd, 0xd9, 0xeb, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xda, 0x5a, 0xaa, 0x98, + 0x86, 0x86, 0x89, 0x57, 0x60, 0x39, 0x6d, 0x01, 0x69, 0x2e, 0x5a, 0x15, 0xbb, 0x74, 0xaa, 0xa3, + 0xa2, 0x42, 0xe7, 0xf4, 0xe9, 0x36, 0xa0, 0xc0, 0x77, 0x6f, 0x5f, 0x24, 0xe7, 0xb2, 0x49, 0x0d, + 0xc1, 0x37, 0x23, 0xe9, 0x8b, 0x55, 0x0a, 0xdb, 0xa9, 0xee, 0x65, 0x2e, 0xd5, 0xbd, 0xa0, 0x3b, + 0x50, 0x0d, 0x4f, 0x83, 0xe5, 0x8b, 0x5c, 0xa0, 0x64, 0x56, 0xe4, 0x46, 0xa7, 0x34, 0xcd, 0xe8, + 0x05, 0xdd, 0xe8, 0xe8, 0x2e, 0xcc, 0x93, 0x09, 0x71, 0x02, 0xbf, 0x51, 0x66, 0x11, 0xa3, 0x2a, + 0x73, 0xf7, 0x0e, 0xa5, 0x9a, 0xa2, 0x13, 0x7f, 0x17, 0x16, 0xd9, 0x1d, 0xe9, 0xb1, 0x67, 0x39, + 0xea, 0x65, 0xae, 0xdb, 0xdd, 0x13, 0xe6, 0xa6, 0x3f, 0x51, 0x0d, 0x72, 0x3b, 0xdb, 0xc2, 0x08, + 0xb9, 0x9d, 0x6d, 0xfc, 0x13, 0x03, 0x90, 0x3a, 0x6e, 0x2a, 0x3b, 0xc7, 0x84, 0x4b, 0xf8, 0x7c, + 0x04, 0xbf, 0x0c, 0x73, 0xc4, 0xf3, 0x5c, 0x8f, 0x59, 0xb4, 0x64, 0xf2, 0x06, 0x7e, 0x4b, 0xe8, + 0x60, 0x92, 0x89, 0x7b, 0x1e, 0x9e, 0x41, 0x2e, 0xcd, 0x08, 0x55, 0xdd, 0x85, 0x25, 0x8d, 0x6b, + 0xaa, 0xc8, 0xf5, 0x21, 0x2c, 0x30, 0x61, 0x5b, 0x67, 0xa4, 0x77, 0x3e, 0x72, 0x6d, 0x27, 0x81, + 0x47, 0x57, 0x2e, 0x72, 0xb0, 0x74, 0x1e, 0x7c, 0x62, 0x95, 0x90, 0xd8, 0xed, 0xee, 0xe1, 0xcf, + 0x60, 0x25, 0x26, 0x47, 0xaa, 0xff, 0x27, 0x50, 0xee, 0x85, 0x44, 0x5f, 0xe4, 0x3a, 0xb7, 0x74, + 0xe5, 0xe2, 0x43, 0xd5, 0x11, 0xf8, 0x00, 0xae, 0x27, 0x44, 0x4f, 0x35, 0xe7, 0xb7, 0xe1, 0x1a, + 0x13, 0xb8, 0x4b, 0xc8, 0xa8, 0x3d, 0xb0, 0x27, 0x99, 0x96, 0x1e, 0x89, 0x49, 0x29, 0x8c, 0x5f, + 0xef, 0xbe, 0xc0, 0x7f, 0x24, 0x10, 0xbb, 0xf6, 0x90, 0x74, 0xdd, 0xbd, 0x6c, 0xdd, 0x68, 0x34, + 0x3b, 0x27, 0x97, 0xbe, 0x48, 0x6b, 0xd8, 0x6f, 0xfc, 0x2f, 0x86, 0x30, 0x95, 0x3a, 0xfc, 0x6b, + 0xde, 0xc9, 0xab, 0x00, 0xa7, 0xf4, 0xc8, 0x90, 0x3e, 0xed, 0xe0, 0x15, 0x15, 0x85, 0x12, 0xea, + 0x49, 0xfd, 0x77, 0x45, 0xe8, 0xb9, 0x2c, 0xf6, 0x39, 0xfb, 0x27, 0xf4, 0x72, 0xb7, 0xa0, 0xcc, + 0x08, 0x47, 0x81, 0x15, 0x8c, 0xfd, 0xc4, 0x62, 0xfc, 0x95, 0xd8, 0xf6, 0x72, 0xd0, 0x54, 0xf3, + 0xfa, 0x16, 0xcc, 0xb3, 0xcb, 0x84, 0x4c, 0xa5, 0x6f, 0xa4, 0xec, 0x47, 0xae, 0x87, 0x29, 0x18, + 0xf1, 0xcf, 0x0c, 0x98, 0x7f, 0xca, 0x4a, 0xb0, 0x8a, 0x6a, 0xb3, 0x72, 0x2d, 0x1c, 0x6b, 0xc8, + 0x0b, 0x43, 0x25, 0x93, 0xfd, 0x66, 0xa9, 0x27, 0x21, 0xde, 0x33, 0x73, 0x8f, 0xa7, 0xb8, 0x25, + 0x33, 0x6c, 0x53, 0x9b, 0xf5, 0x06, 0x36, 0x71, 0x02, 0xd6, 0x3b, 0xcb, 0x7a, 0x15, 0x0a, 0xcd, + 0x9e, 0x6d, 0x7f, 0x8f, 0x58, 0x9e, 0x23, 0x8a, 0xa6, 0x45, 0x33, 0x22, 0xe0, 0x3d, 0xa8, 0x73, + 0x3d, 0xda, 0xfd, 0xbe, 0x92, 0x60, 0x86, 0x68, 0x46, 0x0c, 0x4d, 0x93, 0x96, 0x8b, 0x4b, 0xfb, + 0xa5, 0x01, 0x8b, 0x8a, 0xb8, 0xa9, 0xac, 0xfa, 0x2e, 0xcc, 0xf3, 0x22, 0xb5, 0xc8, 0x74, 0x96, + 0xf5, 0x51, 0x1c, 0xc6, 0x14, 0x3c, 0x68, 0x03, 0x0a, 0xfc, 0x97, 0xbc, 0x03, 0xa4, 0xb3, 0x4b, + 0x26, 0x7c, 0x17, 0x96, 0x04, 0x89, 0x0c, 0xdd, 0xb4, 0x83, 0xc1, 0x16, 0x03, 0xff, 0x05, 0x2c, + 0xeb, 0x6c, 0x53, 0x4d, 0x49, 0x51, 0x32, 0xf7, 0x3a, 0x4a, 0xb6, 0xa5, 0x92, 0xcf, 0x46, 0x7d, + 0x25, 0x8f, 0x8a, 0xef, 0x18, 0x75, 0xbd, 0x72, 0xfa, 0x7a, 0x45, 0x13, 0x90, 0x22, 0xbe, 0xd1, + 0x09, 0x7c, 0x20, 0xb7, 0xc3, 0x9e, 0xed, 0x87, 0x3e, 0x1c, 0x43, 0x65, 0x60, 0x3b, 0xc4, 0xf2, + 0x44, 0xe5, 0xdc, 0xe0, 0x95, 0x73, 0x95, 0x86, 0xbf, 0x00, 0xa4, 0x0e, 0xfc, 0x46, 0x95, 0xbe, + 0x27, 0x4d, 0x76, 0xe8, 0xb9, 0x43, 0x37, 0xd3, 0xec, 0xf8, 0x2f, 0xe1, 0x5a, 0x8c, 0xef, 0x1b, + 0x55, 0x73, 0x09, 0x16, 0xb7, 0x89, 0x4c, 0x68, 0xa4, 0xdb, 0xfb, 0x08, 0x90, 0x4a, 0x9c, 0x2a, + 0xb2, 0xb5, 0x60, 0xf1, 0xa9, 0x3b, 0xa1, 0x2e, 0x92, 0x52, 0x23, 0xdf, 0xc0, 0xeb, 0x10, 0xa1, + 0x29, 0xc2, 0x36, 0x05, 0x57, 0x07, 0x4c, 0x05, 0xfe, 0x1f, 0x06, 0x54, 0xda, 0x03, 0xcb, 0x1b, + 0x4a, 0xe0, 0xef, 0xc1, 0x3c, 0xbf, 0x5d, 0x8b, 0x82, 0xd6, 0x3d, 0x5d, 0x8c, 0xca, 0xcb, 0x1b, + 0x6d, 0x7e, 0x17, 0x17, 0xa3, 0xa8, 0xe2, 0xe2, 0xcd, 0x6b, 0x3b, 0xf6, 0x06, 0xb6, 0x8d, 0xde, + 0x83, 0x39, 0x8b, 0x0e, 0x61, 0xa1, 0xa8, 0x16, 0xaf, 0x6b, 0x30, 0x69, 0xec, 0x0e, 0xc0, 0xb9, + 0xf0, 0x77, 0xa0, 0xac, 0x20, 0xa0, 0x02, 0xe4, 0x1f, 0x77, 0x44, 0xc2, 0xde, 0xde, 0xea, 0xee, + 0x3c, 0xe7, 0x05, 0x9d, 0x1a, 0xc0, 0x76, 0x27, 0x6c, 0xe7, 0xf0, 0xa7, 0x62, 0x94, 0x70, 0xfb, + 0xaa, 0x3e, 0x46, 0x96, 0x3e, 0xb9, 0xd7, 0xd2, 0xe7, 0x02, 0xaa, 0x62, 0xfa, 0xd3, 0x86, 0x31, + 0x26, 0x2f, 0x23, 0x8c, 0x29, 0xca, 0x9b, 0x82, 0x11, 0xff, 0xda, 0x80, 0xfa, 0xb6, 0xfb, 0xd2, + 0x39, 0xf5, 0xac, 0x7e, 0x78, 0x4e, 0x3e, 0x8c, 0xad, 0xd4, 0x46, 0xac, 0x38, 0x1a, 0xe3, 0x8f, + 0x08, 0xb1, 0x15, 0x6b, 0x44, 0x65, 0x43, 0x1e, 0x0b, 0x65, 0x13, 0x7f, 0x00, 0x0b, 0xb1, 0x41, + 0xd4, 0xf6, 0xcf, 0xdb, 0x7b, 0x3b, 0xdb, 0xd4, 0xd6, 0xac, 0xb0, 0xd6, 0xd9, 0x6f, 0x3f, 0xda, + 0xeb, 0x88, 0x07, 0xa4, 0xf6, 0xfe, 0x56, 0x67, 0xaf, 0x9e, 0xc3, 0x3d, 0x58, 0x54, 0xe0, 0xa7, + 0x7d, 0x19, 0xc8, 0xd0, 0x6e, 0x01, 0xaa, 0x22, 0xda, 0x8b, 0x43, 0xf9, 0xef, 0x39, 0xa8, 0x49, + 0xca, 0xd7, 0x83, 0x89, 0x56, 0x60, 0xbe, 0x7f, 0x72, 0x64, 0x7f, 0x21, 0x5f, 0x8e, 0x44, 0x8b, + 0xd2, 0x07, 0x1c, 0x87, 0x3f, 0xdf, 0x8a, 0x16, 0x0d, 0xe3, 0x9e, 0xf5, 0x22, 0xd8, 0x71, 0xfa, + 0xe4, 0x82, 0x25, 0x05, 0xb3, 0x66, 0x44, 0x60, 0x15, 0x26, 0xf1, 0xcc, 0xcb, 0x6e, 0x56, 0xca, + 0xb3, 0x2f, 0xba, 0x0f, 0x75, 0xfa, 0xbb, 0x3d, 0x1a, 0x0d, 0x6c, 0xd2, 0xe7, 0x02, 0x0a, 0x8c, + 0x27, 0x41, 0xa7, 0xe8, 0xec, 0x2e, 0xe2, 0x37, 0x8a, 0x2c, 0x2c, 0x89, 0x16, 0x5a, 0x83, 0x32, + 0xd7, 0x6f, 0xc7, 0x79, 0xe6, 0x13, 0xf6, 0xf6, 0x99, 0x37, 0x55, 0x92, 0x9e, 0x66, 0x40, 0x3c, + 0xcd, 0x58, 0x82, 0xc5, 0xf6, 0x38, 0x38, 0xeb, 0x38, 0x34, 0x56, 0x48, 0x2b, 0x2f, 0x03, 0xa2, + 0xc4, 0x6d, 0xdb, 0x57, 0xa9, 0x82, 0x55, 0x5f, 0x90, 0x0e, 0x2c, 0x51, 0x22, 0x71, 0x02, 0xbb, + 0xa7, 0xc4, 0x55, 0x99, 0x79, 0x19, 0xb1, 0xcc, 0xcb, 0xf2, 0xfd, 0x97, 0xae, 0xd7, 0x17, 0x36, + 0x0f, 0xdb, 0xf8, 0x9f, 0x0c, 0x0e, 0xf9, 0xcc, 0xd7, 0xd2, 0xa7, 0xdf, 0x51, 0x0c, 0x7a, 0x1f, + 0x0a, 0xee, 0x88, 0xbd, 0xf0, 0x8b, 0x32, 0xcc, 0xca, 0x06, 0xff, 0x26, 0x60, 0x43, 0x08, 0x3e, + 0xe0, 0xbd, 0xa6, 0x64, 0x43, 0xf7, 0xa0, 0x76, 0x66, 0xf9, 0x67, 0xa4, 0x7f, 0x28, 0x65, 0xf2, + 0x9b, 0x5f, 0x8c, 0x8a, 0xd7, 0x23, 0xfd, 0x1e, 0x93, 0xe0, 0x0a, 0xfd, 0xf0, 0x03, 0xb8, 0x26, + 0x39, 0xc5, 0xeb, 0xc4, 0x15, 0xcc, 0x2f, 0xe1, 0x96, 0x64, 0xde, 0x3a, 0xb3, 0x9c, 0x53, 0x22, + 0x01, 0x7f, 0x5f, 0x0b, 0x24, 0xe7, 0x93, 0x4f, 0x9d, 0xcf, 0x23, 0x68, 0x84, 0xf3, 0x61, 0x37, + 0x6b, 0x77, 0xa0, 0x2a, 0x3a, 0xf6, 0xc5, 0x79, 0x2a, 0x99, 0xec, 0x37, 0xa5, 0x79, 0xee, 0x20, + 0x4c, 0xa5, 0xe9, 0x6f, 0xbc, 0x05, 0x37, 0xa4, 0x0c, 0x71, 0xe7, 0xd5, 0x85, 0x24, 0x14, 0x4f, + 0x13, 0x22, 0x0c, 0x4b, 0x87, 0x5e, 0xbd, 0xf0, 0x2a, 0xa7, 0xbe, 0x04, 0x4c, 0xa6, 0xa1, 0xc8, + 0xbc, 0xc6, 0x37, 0x25, 0x55, 0x4c, 0xc9, 0x96, 0x24, 0x99, 0x0a, 0x50, 0xc9, 0x62, 0xc1, 0x28, + 0x39, 0xb1, 0x60, 0x09, 0xd1, 0x3f, 0x80, 0xd5, 0x50, 0x09, 0x6a, 0xb7, 0x43, 0xe2, 0x0d, 0x6d, + 0xdf, 0x57, 0xea, 0xde, 0x69, 0x13, 0xbf, 0x07, 0xb3, 0x23, 0x22, 0x82, 0x50, 0x79, 0x13, 0xc9, + 0x4d, 0xa9, 0x0c, 0x66, 0xfd, 0xb8, 0x0f, 0xb7, 0xa5, 0x74, 0x6e, 0xd1, 0x54, 0xf1, 0x71, 0xa5, + 0x64, 0x35, 0x30, 0x97, 0x51, 0x0d, 0xcc, 0xc7, 0xde, 0x62, 0x3e, 0xe2, 0x86, 0x94, 0x67, 0x7e, + 0xaa, 0xe4, 0x62, 0x97, 0xdb, 0x34, 0x74, 0x15, 0x53, 0x09, 0xfb, 0x1b, 0xe1, 0x05, 0xbe, 0x2a, + 0x0f, 0x4f, 0xd8, 0x0c, 0xe5, 0x43, 0x87, 0x6c, 0xd2, 0xac, 0x99, 0x2e, 0x80, 0xa9, 0xd6, 0x42, + 0x67, 0x4d, 0x8d, 0x86, 0x4f, 0x60, 0x59, 0xf7, 0x6b, 0x53, 0xe9, 0xb2, 0x0c, 0x73, 0x81, 0x7b, + 0x4e, 0x64, 0xac, 0xe1, 0x0d, 0x69, 0xbb, 0xd0, 0xe7, 0x4d, 0x65, 0x3b, 0x2b, 0x12, 0xc6, 0x4e, + 0xc7, 0xb4, 0xfa, 0xd2, 0x8d, 0x25, 0xef, 0x40, 0xbc, 0x81, 0xf7, 0x61, 0x25, 0xee, 0xd9, 0xa6, + 0x52, 0xf9, 0x39, 0x3f, 0x4b, 0x69, 0xce, 0x6f, 0x2a, 0xb9, 0x1f, 0x47, 0x7e, 0x49, 0xf1, 0x6d, + 0x53, 0x89, 0x34, 0xa1, 0x99, 0xe6, 0xea, 0xbe, 0x8a, 0xa3, 0x13, 0x7a, 0xbe, 0xa9, 0x84, 0xf9, + 0x91, 0xb0, 0xe9, 0x97, 0x3f, 0x72, 0x57, 0xf9, 0x2b, 0xdd, 0x95, 0x38, 0x24, 0x91, 0x43, 0xfd, + 0x1a, 0x36, 0x9d, 0xc0, 0x88, 0x7c, 0xf9, 0xb4, 0x18, 0x34, 0x9c, 0x85, 0x18, 0xac, 0x21, 0x37, + 0xb6, 0x1a, 0x01, 0xa6, 0x5a, 0x8c, 0x4f, 0x22, 0x37, 0x9e, 0x08, 0x12, 0x53, 0x09, 0xfe, 0x14, + 0xd6, 0xb2, 0xe3, 0xc3, 0x34, 0x92, 0xef, 0xb7, 0xa0, 0x14, 0x5e, 0x86, 0x94, 0xef, 0xcd, 0xca, + 0x50, 0xd8, 0x3f, 0x38, 0x3a, 0x6c, 0x6f, 0x75, 0xf8, 0x07, 0x67, 0x5b, 0x07, 0xa6, 0xf9, 0xec, + 0xb0, 0x5b, 0xcf, 0x6d, 0xfe, 0x26, 0x0f, 0xb9, 0xdd, 0xe7, 0xe8, 0x33, 0x98, 0xe3, 0x5f, 0x5f, + 0x5c, 0xf1, 0xc9, 0x4d, 0xf3, 0xaa, 0x0f, 0x4c, 0xf0, 0xf5, 0x9f, 0xfc, 0xf7, 0x6f, 0x7e, 0x91, + 0x5b, 0xc4, 0x95, 0xd6, 0xe4, 0xdb, 0xad, 0xf3, 0x49, 0x8b, 0x85, 0xa9, 0x87, 0xc6, 0x7d, 0xf4, + 0x31, 0xe4, 0x0f, 0xc7, 0x01, 0xca, 0xfc, 0x14, 0xa7, 0x99, 0xfd, 0xcd, 0x09, 0xbe, 0xc6, 0x84, + 0x2e, 0x60, 0x10, 0x42, 0x47, 0xe3, 0x80, 0x8a, 0xfc, 0x21, 0x94, 0xd5, 0x2f, 0x46, 0x5e, 0xf9, + 0x7d, 0x4e, 0xf3, 0xd5, 0x5f, 0xa3, 0xe0, 0x5b, 0x0c, 0xea, 0x3a, 0x46, 0x02, 0x8a, 0x7f, 0xd3, + 0xa2, 0xce, 0xa2, 0x7b, 0xe1, 0xa0, 0xcc, 0xaf, 0x77, 0x9a, 0xd9, 0x1f, 0xa8, 0x24, 0x66, 0x11, + 0x5c, 0x38, 0x54, 0xe4, 0x9f, 0x89, 0x6f, 0x53, 0x7a, 0x01, 0xba, 0x9d, 0xf2, 0x6d, 0x82, 0xfa, + 0x0a, 0xdf, 0x5c, 0xcb, 0x66, 0x10, 0x20, 0x37, 0x19, 0xc8, 0x0a, 0x5e, 0x14, 0x20, 0xbd, 0x90, + 0xe5, 0xa1, 0x71, 0x7f, 0xb3, 0x07, 0x73, 0xec, 0x85, 0x0b, 0x7d, 0x2e, 0x7f, 0x34, 0x53, 0x9e, + 0xfa, 0x32, 0x16, 0x5a, 0x7b, 0x1b, 0xc3, 0xcb, 0x0c, 0xa8, 0x86, 0x4b, 0x14, 0x88, 0xbd, 0x6f, + 0x3d, 0x34, 0xee, 0xaf, 0x1b, 0xef, 0x1b, 0x9b, 0xbf, 0x9e, 0x83, 0x39, 0x56, 0xda, 0x45, 0xe7, + 0x00, 0xd1, 0x6b, 0x4f, 0x7c, 0x76, 0x89, 0xf7, 0xa3, 0xf8, 0xec, 0x92, 0x0f, 0x45, 0xb8, 0xc9, + 0x40, 0x97, 0xf1, 0x02, 0x05, 0x65, 0x15, 0xe3, 0x16, 0x2b, 0x82, 0x53, 0x3b, 0xfe, 0xad, 0x21, + 0x2a, 0xdb, 0xfc, 0x2c, 0xa1, 0x34, 0x69, 0xda, 0x93, 0x4f, 0x7c, 0x3b, 0xa4, 0x3c, 0xf7, 0xe0, + 0xef, 0x32, 0xc0, 0x16, 0xae, 0x47, 0x80, 0x1e, 0xe3, 0x78, 0x68, 0xdc, 0xff, 0xbc, 0x81, 0x97, + 0x84, 0x95, 0x63, 0x3d, 0xe8, 0x47, 0x50, 0xd3, 0x9f, 0x34, 0xd0, 0x9d, 0x14, 0xac, 0xf8, 0xcb, + 0x48, 0xf3, 0xad, 0xab, 0x99, 0x84, 0x4e, 0xab, 0x4c, 0x27, 0x01, 0xce, 0x91, 0xcf, 0x09, 0x19, + 0x59, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x47, 0x43, 0xbc, 0x38, 0x45, 0x6f, 0x14, 0x28, 0x4d, 0x7a, + 0xe2, 0x05, 0xa4, 0x79, 0xf7, 0x15, 0x5c, 0x42, 0x89, 0x3f, 0x66, 0x4a, 0x7c, 0x80, 0x97, 0x23, + 0x25, 0x02, 0x7b, 0x48, 0x02, 0x57, 0x68, 0xf1, 0xf9, 0x4d, 0x7c, 0x5d, 0x33, 0x8e, 0xd6, 0x1b, + 0x2d, 0x16, 0x7f, 0x67, 0x48, 0x5d, 0x2c, 0xed, 0xdd, 0x22, 0x75, 0xb1, 0xf4, 0x47, 0x8a, 0xb4, + 0xc5, 0xe2, 0xaf, 0x0a, 0x69, 0x8b, 0x15, 0xf6, 0x6c, 0xfe, 0xff, 0x2c, 0x14, 0xb6, 0xf8, 0x37, + 0xe1, 0xc8, 0x85, 0x52, 0x58, 0xa6, 0x47, 0xab, 0x69, 0x75, 0xc6, 0xe8, 0x5a, 0xd3, 0xbc, 0x9d, + 0xd9, 0x2f, 0x14, 0x7a, 0x93, 0x29, 0xf4, 0x06, 0x5e, 0xa1, 0xc8, 0xe2, 0xb3, 0xf3, 0x16, 0x2f, + 0x66, 0xb5, 0xac, 0x7e, 0x9f, 0x1a, 0xe2, 0xcf, 0xa1, 0xa2, 0xd6, 0xd1, 0xd1, 0x9b, 0xa9, 0xb5, + 0x4d, 0xb5, 0x14, 0xdf, 0xc4, 0x57, 0xb1, 0x08, 0xe4, 0xb7, 0x18, 0xf2, 0x2a, 0xbe, 0x91, 0x82, + 0xec, 0x31, 0x56, 0x0d, 0x9c, 0xd7, 0xc0, 0xd3, 0xc1, 0xb5, 0x12, 0x7b, 0x3a, 0xb8, 0x5e, 0x42, + 0xbf, 0x12, 0x7c, 0xcc, 0x58, 0x29, 0xb8, 0x0f, 0x10, 0x55, 0xb2, 0x51, 0xaa, 0x2d, 0x95, 0x7b, + 0x5d, 0xdc, 0x39, 0x24, 0x8b, 0xe0, 0x18, 0x33, 0x58, 0xb1, 0xef, 0x62, 0xb0, 0x03, 0xdb, 0x0f, + 0xf8, 0xc1, 0xac, 0x6a, 0xa5, 0x69, 0x94, 0x3a, 0x1f, 0xbd, 0xbe, 0xdd, 0xbc, 0x73, 0x25, 0x8f, + 0x40, 0xbf, 0xcb, 0xd0, 0x6f, 0xe3, 0x66, 0x0a, 0xfa, 0x88, 0xf3, 0xd2, 0xcd, 0xf6, 0xe3, 0x02, + 0x94, 0x9f, 0x5a, 0xb6, 0x13, 0x10, 0xc7, 0x72, 0x7a, 0x04, 0x9d, 0xc0, 0x1c, 0x8b, 0xd4, 0x71, + 0x47, 0xac, 0x96, 0x6d, 0xe3, 0x8e, 0x58, 0xab, 0x69, 0xe2, 0x35, 0x06, 0xdc, 0xc4, 0xd7, 0x28, + 0xf0, 0x30, 0x12, 0xdd, 0x62, 0xa5, 0x48, 0x3a, 0xe9, 0x17, 0x30, 0x2f, 0x5e, 0xfb, 0x62, 0x82, + 0xb4, 0xe2, 0x4f, 0xf3, 0x66, 0x7a, 0x67, 0xda, 0x5e, 0x56, 0x61, 0x7c, 0xc6, 0x47, 0x71, 0x26, + 0x00, 0x51, 0x8d, 0x3d, 0xbe, 0xa2, 0x89, 0x92, 0x7c, 0x73, 0x2d, 0x9b, 0x21, 0xcd, 0xa6, 0x2a, + 0x66, 0x3f, 0xe4, 0xa5, 0xb8, 0x7f, 0x0a, 0xb3, 0x4f, 0x2c, 0xff, 0x0c, 0xc5, 0x62, 0xaf, 0xf2, + 0xad, 0x58, 0xb3, 0x99, 0xd6, 0x25, 0x50, 0x6e, 0x33, 0x94, 0x1b, 0xdc, 0x95, 0xa9, 0x28, 0x67, + 0x96, 0x7f, 0x26, 0xec, 0xc7, 0x3f, 0x1d, 0x8b, 0xdb, 0x4f, 0xfb, 0xfc, 0x2c, 0x6e, 0x3f, 0xfd, + 0x6b, 0xb3, 0x6c, 0xfb, 0x51, 0x94, 0xf3, 0x09, 0xc5, 0x19, 0x41, 0x51, 0x7e, 0xad, 0x85, 0x62, + 0x4f, 0xf7, 0xb1, 0x2f, 0xbb, 0x9a, 0xab, 0x59, 0xdd, 0x02, 0xed, 0x0e, 0x43, 0xbb, 0x85, 0x1b, + 0x89, 0xd5, 0x12, 0x9c, 0x0f, 0x8d, 0xfb, 0xef, 0x1b, 0xe8, 0x47, 0x00, 0xd1, 0xc3, 0x44, 0xe2, + 0x0c, 0xc6, 0xdf, 0x38, 0x12, 0x67, 0x30, 0xf1, 0xa6, 0x81, 0x37, 0x18, 0xee, 0x3a, 0xbe, 0x13, + 0xc7, 0x0d, 0x3c, 0xcb, 0xf1, 0x5f, 0x10, 0xef, 0x3d, 0x5e, 0x67, 0xf5, 0xcf, 0xec, 0x11, 0x9d, + 0xb2, 0x07, 0xa5, 0xb0, 0xee, 0x1c, 0xf7, 0xb7, 0xf1, 0x7a, 0x78, 0xdc, 0xdf, 0x26, 0x0a, 0xd6, + 0xba, 0xe3, 0xd1, 0xf6, 0x8b, 0x64, 0xa5, 0x47, 0xf0, 0x97, 0x75, 0x98, 0xa5, 0x79, 0x37, 0x4d, + 0x4f, 0xa2, 0xca, 0x49, 0x7c, 0xf6, 0x89, 0x3a, 0x6a, 0x7c, 0xf6, 0xc9, 0xa2, 0x8b, 0x9e, 0x9e, + 0xd0, 0x6b, 0x56, 0x8b, 0x17, 0x29, 0xe8, 0x4c, 0x5d, 0x28, 0x2b, 0xa5, 0x15, 0x94, 0x22, 0x4c, + 0x2f, 0xd0, 0xc6, 0x03, 0x5e, 0x4a, 0x5d, 0x06, 0xbf, 0xc1, 0xf0, 0xae, 0xf1, 0x80, 0xc7, 0xf0, + 0xfa, 0x9c, 0x83, 0x02, 0x8a, 0xd9, 0x89, 0x93, 0x9f, 0x32, 0x3b, 0xfd, 0xf4, 0xaf, 0x65, 0x33, + 0x64, 0xce, 0x2e, 0x3a, 0xfa, 0x2f, 0xa1, 0xa2, 0x16, 0x58, 0x50, 0x8a, 0xf2, 0xb1, 0xa2, 0x72, + 0x3c, 0x92, 0xa4, 0xd5, 0x67, 0x74, 0xdf, 0xc6, 0x20, 0x2d, 0x85, 0x8d, 0x02, 0x0f, 0xa0, 0x20, + 0x2a, 0x2e, 0x69, 0x26, 0xd5, 0x0b, 0xd0, 0x69, 0x26, 0x8d, 0x95, 0x6b, 0xf4, 0xfc, 0x99, 0x21, + 0xd2, 0x4b, 0xa5, 0x8c, 0xd6, 0x02, 0xed, 0x31, 0x09, 0xb2, 0xd0, 0xa2, 0x5a, 0x66, 0x16, 0x9a, + 0x72, 0xa1, 0xcf, 0x42, 0x3b, 0x25, 0x81, 0xf0, 0x07, 0xf2, 0xa2, 0x8c, 0x32, 0x84, 0xa9, 0x11, + 0x12, 0x5f, 0xc5, 0x92, 0x76, 0xbd, 0x89, 0x00, 0x65, 0x78, 0xbc, 0x00, 0x88, 0xea, 0x41, 0xf1, + 0x9c, 0x35, 0xb5, 0x0e, 0x1e, 0xcf, 0x59, 0xd3, 0x4b, 0x4a, 0xba, 0x8f, 0x8d, 0x70, 0xf9, 0xed, + 0x8a, 0x22, 0xff, 0xdc, 0x00, 0x94, 0x2c, 0x1d, 0xa1, 0x07, 0xe9, 0xd2, 0x53, 0xab, 0xeb, 0xcd, + 0x77, 0x5f, 0x8f, 0x39, 0xcd, 0x21, 0x47, 0x2a, 0xf5, 0x18, 0xf7, 0xe8, 0x25, 0x55, 0xea, 0xc7, + 0x06, 0x54, 0xb5, 0xba, 0x13, 0xba, 0x97, 0xb1, 0xa6, 0xb1, 0xa2, 0x7b, 0xf3, 0xed, 0x57, 0xf2, + 0xa5, 0x25, 0xf3, 0xca, 0x0e, 0x90, 0xb7, 0x9a, 0x9f, 0x1a, 0x50, 0xd3, 0xeb, 0x54, 0x28, 0x43, + 0x76, 0xa2, 0x68, 0xdf, 0x5c, 0x7f, 0x35, 0xe3, 0xd5, 0xcb, 0x13, 0x5d, 0x68, 0x06, 0x50, 0x10, + 0x95, 0xad, 0xb4, 0x8d, 0xaf, 0x97, 0xfb, 0xd3, 0x36, 0x7e, 0xac, 0x2c, 0x96, 0xb2, 0xf1, 0x3d, + 0x77, 0x40, 0x94, 0x63, 0x26, 0x4a, 0x5f, 0x59, 0x68, 0x57, 0x1f, 0xb3, 0x58, 0xdd, 0x2c, 0x0b, + 0x2d, 0x3a, 0x66, 0xb2, 0xe6, 0x85, 0x32, 0x84, 0xbd, 0xe2, 0x98, 0xc5, 0x4b, 0x66, 0x29, 0xc7, + 0x8c, 0x01, 0x2a, 0xc7, 0x2c, 0xaa, 0x4e, 0xa5, 0x1d, 0xb3, 0xc4, 0xeb, 0x45, 0xda, 0x31, 0x4b, + 0x16, 0xb8, 0x52, 0xd6, 0x91, 0xe1, 0x6a, 0xc7, 0x6c, 0x29, 0xa5, 0x90, 0x85, 0xde, 0xcd, 0x30, + 0x62, 0xea, 0xa3, 0x48, 0xf3, 0xbd, 0xd7, 0xe4, 0xce, 0xdc, 0xe3, 0xdc, 0xfc, 0x72, 0x8f, 0xff, + 0x9d, 0x01, 0xcb, 0x69, 0x45, 0x30, 0x94, 0x81, 0x93, 0xf1, 0x98, 0xd2, 0xdc, 0x78, 0x5d, 0xf6, + 0xab, 0xad, 0x15, 0xee, 0xfa, 0x47, 0xf5, 0x7f, 0xfb, 0x72, 0xd5, 0xf8, 0xcf, 0x2f, 0x57, 0x8d, + 0xff, 0xf9, 0x72, 0xd5, 0xf8, 0xfb, 0xff, 0x5d, 0x9d, 0x39, 0x99, 0x67, 0xff, 0xd5, 0xf8, 0xdb, + 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x54, 0x11, 0xdf, 0xef, 0x3c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto index 14391378a..4ccc23474 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto @@ -217,7 +217,7 @@ service Maintenance { // It only iterates "key" bucket in backend storage. rpc HashKV(HashKVRequest) returns (HashKVResponse) { option (google.api.http) = { - post: "/v3/maintenance/hash" + post: "/v3/maintenance/hashkv" body: "*" }; } diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go index ae112ae13..23201302e 100644 --- a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go @@ -45,6 +45,7 @@ var ( ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() + ErrGRPCClusterIdMismatch = status.New(codes.FailedPrecondition, "etcdserver: cluster ID mismatch").Err() ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() @@ -114,6 +115,7 @@ var ( ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, + ErrorDesc(ErrGRPCClusterIdMismatch): ErrGRPCClusterIdMismatch, ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, @@ -200,6 +202,7 @@ var ( ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + ErrClusterIdMismatch = Error(ErrGRPCClusterIdMismatch) ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotLeader = Error(ErrGRPCNotLeader) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index d62f6474d..1819ead72 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.9" + Version = "3.5.10" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go index 38548ddd7..fe028c613 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go @@ -21,12 +21,12 @@ type SocketOpts struct { // in which case lock on data file could result in unexpected // condition. User should take caution to protect against lock race. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReusePort bool + ReusePort bool `json:"reuse-port"` // ReuseAddress enables a socket option SO_REUSEADDR which allows // binding to an address in `TIME_WAIT` state. Useful to improve MTTR // in cases where etcd slow to restart due to excessive `TIME_WAIT`. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReuseAddress bool + ReuseAddress bool `json:"reuse-address"` } func getControls(sopts *SocketOpts) Controls { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go index 62fe0d385..8c3a35b14 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go @@ -15,6 +15,7 @@ package transport import ( + "context" "fmt" "strings" "time" @@ -27,6 +28,8 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { if err != nil { return nil, err } + defer t.CloseIdleConnections() + var errs []string var endpoints []string for _, ep := range eps { @@ -34,7 +37,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { errs = append(errs, fmt.Sprintf("%q is insecure", ep)) continue } - conn, cerr := t.Dial("tcp", ep[len("https://"):]) + conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):]) if cerr != nil { errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) continue diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 4dfae89c6..efa44e890 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -264,6 +264,7 @@ func (c *Client) getToken(ctx context.Context) error { resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) if err != nil { if err == rpctypes.ErrAuthNotEnabled { + c.authTokenBundle.UpdateAuthToken("") return nil } return err @@ -501,7 +502,7 @@ func (c *Client) checkVersion() (err error) { return } } - if maj < 3 || (maj == 3 && min < 2) { + if maj < 3 || (maj == 3 && min < 4) { rerr = ErrOldCluster } errc <- rerr @@ -509,7 +510,7 @@ func (c *Client) checkVersion() (err error) { } // wait for success for range eps { - if err = <-errc; err == nil { + if err = <-errc; err != nil { break } } diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go index f6674235c..35a3fe8c3 100644 --- a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go +++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go @@ -41,10 +41,6 @@ func extractHostFromHostPort(ep string) string { return host } -func extractHostFromPath(pathStr string) string { - return extractHostFromHostPort(path.Base(pathStr)) -} - // mustSplit2 returns the values from strings.SplitN(s, sep, 2). // If sep is not found, it returns ("", "", false) instead. func mustSplit2(s, sep string) (string, string) { @@ -96,29 +92,29 @@ func translateEndpoint(ep string) (addr string, serverName string, requireCreds if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { // absolute path case schema, absolutePath := mustSplit2(ep, "://") - return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema) + return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema) } if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { // legacy etcd local path schema, localPath := mustSplit2(ep, "://") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) } schema, localPath := mustSplit2(ep, ":") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema) } if strings.Contains(ep, "://") { url, err := url.Parse(ep) if err != nil { - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL + return ep, ep, CREDS_OPTIONAL } if url.Scheme == "http" || url.Scheme == "https" { - return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme) + return url.Host, url.Host, schemeToCredsRequirement(url.Scheme) } - return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme) + return ep, url.Host, schemeToCredsRequirement(url.Scheme) } // Handles plain addresses like 10.0.0.44:437. - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL + return ep, ep, CREDS_OPTIONAL } // RequiresCredentials returns whether given endpoint requires diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index 9e1b70464..19af9c093 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -294,7 +294,9 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl } l.mu.Unlock() - go l.keepAliveCtxCloser(ctx, id, ka.donec) + if ctx.Done() != nil { + go l.keepAliveCtxCloser(ctx, id, ka.donec) + } l.firstKeepAliveOnce.Do(func() { go l.recvKeepAliveLoop() go l.deadlineLoop() diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/go.uber.org/mock/AUTHORS similarity index 100% rename from vendor/github.com/golang/mock/AUTHORS rename to vendor/go.uber.org/mock/AUTHORS diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/go.uber.org/mock/LICENSE similarity index 100% rename from vendor/github.com/golang/mock/LICENSE rename to vendor/go.uber.org/mock/LICENSE diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/go.uber.org/mock/gomock/call.go similarity index 78% rename from vendor/github.com/golang/mock/gomock/call.go rename to vendor/go.uber.org/mock/gomock/call.go index 13c9f44b1..e1ea82637 100644 --- a/vendor/github.com/golang/mock/gomock/call.go +++ b/vendor/go.uber.org/mock/gomock/call.go @@ -25,7 +25,7 @@ import ( type Call struct { t TestHelper // for triggering test failures on invalid call setup - receiver interface{} // the receiver of the method call + receiver any // the receiver of the method call method string // the name of the method methodType reflect.Type // the type of the method args []Matcher // the args @@ -41,12 +41,12 @@ type Call struct { // actions are called when this Call is called. Each action gets the args and // can set the return values by returning a non-nil slice. Actions run in the // order they are created. - actions []func([]interface{}) []interface{} + actions []func([]any) []any } // newCall creates a *Call. It requires the method type in order to support // unexported methods. -func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { +func newCall(t TestHelper, receiver any, method string, methodType reflect.Type, args ...any) *Call { t.Helper() // TODO: check arity, types. @@ -67,9 +67,9 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle // and this line changes, i.e. this code is wrapped in another anonymous function. // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. origin := callerInfo(3) - actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { + actions := []func([]any) []any{func([]any) []any { // Synthesize the zero value for each of the return args' types. - rets := make([]interface{}, methodType.NumOut()) + rets := make([]any, methodType.NumOut()) for i := 0; i < methodType.NumOut(); i++ { rets[i] = reflect.Zero(methodType.Out(i)).Interface() } @@ -107,20 +107,26 @@ func (c *Call) MaxTimes(n int) *Call { // DoAndReturn declares the action to run when the call is matched. // The return values from this function are returned by the mocked function. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) DoAndReturn(f interface{}) *Call { +// It takes an any argument to support n-arity functions. +// The anonymous function must match the function signature mocked method. +func (c *Call) DoAndReturn(f any) *Call { // TODO: Check arity and types here, rather than dying badly elsewhere. v := reflect.ValueOf(f) - c.addAction(func(args []interface{}) []interface{} { + c.addAction(func(args []any) []any { c.t.Helper() - vArgs := make([]reflect.Value, len(args)) ft := v.Type() if c.methodType.NumIn() != ft.NumIn() { - c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + if ft.IsVariadic() { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.", + c.receiver, c.method) + } else { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + } return nil } + vArgs := make([]reflect.Value, len(args)) for i := 0; i < len(args); i++ { if args[i] != nil { vArgs[i] = reflect.ValueOf(args[i]) @@ -130,7 +136,7 @@ func (c *Call) DoAndReturn(f interface{}) *Call { } } vRets := v.Call(vArgs) - rets := make([]interface{}, len(vRets)) + rets := make([]any, len(vRets)) for i, ret := range vRets { rets[i] = ret.Interface() } @@ -142,20 +148,26 @@ func (c *Call) DoAndReturn(f interface{}) *Call { // Do declares the action to run when the call is matched. The function's // return values are ignored to retain backward compatibility. To use the // return values call DoAndReturn. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) Do(f interface{}) *Call { +// It takes an any argument to support n-arity functions. +// The anonymous function must match the function signature mocked method. +func (c *Call) Do(f any) *Call { // TODO: Check arity and types here, rather than dying badly elsewhere. v := reflect.ValueOf(f) - c.addAction(func(args []interface{}) []interface{} { + c.addAction(func(args []any) []any { c.t.Helper() - if c.methodType.NumIn() != v.Type().NumIn() { - c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin) + ft := v.Type() + if c.methodType.NumIn() != ft.NumIn() { + if ft.IsVariadic() { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.", + c.receiver, c.method) + } else { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + } return nil } vArgs := make([]reflect.Value, len(args)) - ft := v.Type() for i := 0; i < len(args); i++ { if args[i] != nil { vArgs[i] = reflect.ValueOf(args[i]) @@ -171,7 +183,7 @@ func (c *Call) Do(f interface{}) *Call { } // Return declares the values to be returned by the mocked function call. -func (c *Call) Return(rets ...interface{}) *Call { +func (c *Call) Return(rets ...any) *Call { c.t.Helper() mt := c.methodType @@ -203,7 +215,7 @@ func (c *Call) Return(rets ...interface{}) *Call { } } - c.addAction(func([]interface{}) []interface{} { + c.addAction(func([]any) []any { return rets }) @@ -217,9 +229,9 @@ func (c *Call) Times(n int) *Call { } // SetArg declares an action that will set the nth argument's value, -// indirected through a pointer. Or, in the case of a slice, SetArg -// will copy value's elements into the nth argument. -func (c *Call) SetArg(n int, value interface{}) *Call { +// indirected through a pointer. Or, in the case of a slice and map, SetArg +// will copy value's elements/key-value pairs into the nth argument. +func (c *Call) SetArg(n int, value any) *Call { c.t.Helper() mt := c.methodType @@ -243,16 +255,20 @@ func (c *Call) SetArg(n int, value interface{}) *Call { // nothing to do case reflect.Slice: // nothing to do + case reflect.Map: + // nothing to do default: - c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]", + c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice non-map type %v [%s]", n, at, c.origin) } - c.addAction(func(args []interface{}) []interface{} { + c.addAction(func(args []any) []any { v := reflect.ValueOf(value) switch reflect.TypeOf(args[n]).Kind() { case reflect.Slice: setSlice(args[n], v) + case reflect.Map: + setMap(args[n], v) default: reflect.ValueOf(args[n]).Elem().Set(v) } @@ -307,7 +323,7 @@ func (c *Call) String() string { // Tests if the given call matches the expected call. // If yes, returns nil. If no, returns error with message explaining why it does not match. -func (c *Call) matches(args []interface{}) error { +func (c *Call) matches(args []any) error { if !c.methodType.IsVariadic() { if len(args) != len(c.args) { return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", @@ -413,30 +429,77 @@ func (c *Call) dropPrereqs() (preReqs []*Call) { return } -func (c *Call) call() []func([]interface{}) []interface{} { +func (c *Call) call() []func([]any) []any { c.numCalls++ return c.actions } // InOrder declares that the given calls should occur in order. -func InOrder(calls ...*Call) { +// It panics if the type of any of the arguments isn't *Call or a generated +// mock with an embedded *Call. +func InOrder(args ...any) { + calls := make([]*Call, 0, len(args)) + for i := 0; i < len(args); i++ { + if call := getCall(args[i]); call != nil { + calls = append(calls, call) + continue + } + panic(fmt.Sprintf( + "invalid argument at position %d of type %T, InOrder expects *gomock.Call or generated mock types with an embedded *gomock.Call", + i, + args[i], + )) + } for i := 1; i < len(calls); i++ { calls[i].After(calls[i-1]) } } -func setSlice(arg interface{}, v reflect.Value) { +// getCall checks if the parameter is a *Call or a generated struct +// that wraps a *Call and returns the *Call pointer - if neither, it returns nil. +func getCall(arg any) *Call { + if call, ok := arg.(*Call); ok { + return call + } + t := reflect.ValueOf(arg) + if t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface { + return nil + } + t = t.Elem() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !f.CanInterface() { + continue + } + if call, ok := f.Interface().(*Call); ok { + return call + } + } + return nil +} + +func setSlice(arg any, v reflect.Value) { va := reflect.ValueOf(arg) for i := 0; i < v.Len(); i++ { va.Index(i).Set(v.Index(i)) } } -func (c *Call) addAction(action func([]interface{}) []interface{}) { +func setMap(arg any, v reflect.Value) { + va := reflect.ValueOf(arg) + for _, e := range va.MapKeys() { + va.SetMapIndex(e, reflect.Value{}) + } + for _, e := range v.MapKeys() { + va.SetMapIndex(e, v.MapIndex(e)) + } +} + +func (c *Call) addAction(action func([]any) []any) { c.actions = append(c.actions, action) } -func formatGottenArg(m Matcher, arg interface{}) string { +func formatGottenArg(m Matcher, arg any) string { got := fmt.Sprintf("%v (%T)", arg, arg) if gs, ok := m.(GotFormatter); ok { got = gs.Got(arg) diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/go.uber.org/mock/gomock/callset.go similarity index 70% rename from vendor/github.com/golang/mock/gomock/callset.go rename to vendor/go.uber.org/mock/gomock/callset.go index 49dba787a..f5cc592d6 100644 --- a/vendor/github.com/golang/mock/gomock/callset.go +++ b/vendor/go.uber.org/mock/gomock/callset.go @@ -18,40 +18,69 @@ import ( "bytes" "errors" "fmt" + "sync" ) // callSet represents a set of expected calls, indexed by receiver and method // name. type callSet struct { // Calls that are still expected. - expected map[callSetKey][]*Call + expected map[callSetKey][]*Call + expectedMu *sync.Mutex // Calls that have been exhausted. exhausted map[callSetKey][]*Call + // when set to true, existing call expectations are overridden when new call expectations are made + allowOverride bool } // callSetKey is the key in the maps in callSet type callSetKey struct { - receiver interface{} + receiver any fname string } func newCallSet() *callSet { - return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)} + return &callSet{ + expected: make(map[callSetKey][]*Call), + expectedMu: &sync.Mutex{}, + exhausted: make(map[callSetKey][]*Call), + } +} + +func newOverridableCallSet() *callSet { + return &callSet{ + expected: make(map[callSetKey][]*Call), + expectedMu: &sync.Mutex{}, + exhausted: make(map[callSetKey][]*Call), + allowOverride: true, + } } // Add adds a new expected call. func (cs callSet) Add(call *Call) { key := callSetKey{call.receiver, call.method} + + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + m := cs.expected if call.exhausted() { m = cs.exhausted } + if cs.allowOverride { + m[key] = make([]*Call, 0) + } + m[key] = append(m[key], call) } // Remove removes an expected call. func (cs callSet) Remove(call *Call) { key := callSetKey{call.receiver, call.method} + + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + calls := cs.expected[key] for i, c := range calls { if c == call { @@ -64,9 +93,12 @@ func (cs callSet) Remove(call *Call) { } // FindMatch searches for a matching call. Returns error with explanation message if no call matched. -func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) { +func (cs callSet) FindMatch(receiver any, method string, args []any) (*Call, error) { key := callSetKey{receiver, method} + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + // Search through the expected calls. expected := cs.expected[key] var callsErrors bytes.Buffer @@ -101,6 +133,9 @@ func (cs callSet) FindMatch(receiver interface{}, method string, args []interfac // Failures returns the calls that are not satisfied. func (cs callSet) Failures() []*Call { + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + failures := make([]*Call, 0, len(cs.expected)) for _, calls := range cs.expected { for _, call := range calls { @@ -111,3 +146,19 @@ func (cs callSet) Failures() []*Call { } return failures } + +// Satisfied returns true in case all expected calls in this callSet are satisfied. +func (cs callSet) Satisfied() bool { + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + for _, calls := range cs.expected { + for _, call := range calls { + if !call.satisfied() { + return false + } + } + } + + return true +} diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/go.uber.org/mock/gomock/controller.go similarity index 67% rename from vendor/github.com/golang/mock/gomock/controller.go rename to vendor/go.uber.org/mock/gomock/controller.go index f054200d5..9d17a2f0c 100644 --- a/vendor/github.com/golang/mock/gomock/controller.go +++ b/vendor/go.uber.org/mock/gomock/controller.go @@ -12,44 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package gomock is a mock framework for Go. -// -// Standard usage: -// (1) Define an interface that you wish to mock. -// type MyInterface interface { -// SomeMethod(x int64, y string) -// } -// (2) Use mockgen to generate a mock from the interface. -// (3) Use the mock in a test: -// func TestMyThing(t *testing.T) { -// mockCtrl := gomock.NewController(t) -// defer mockCtrl.Finish() -// -// mockObj := something.NewMockMyInterface(mockCtrl) -// mockObj.EXPECT().SomeMethod(4, "blah") -// // pass mockObj to a real object and play with it. -// } -// -// By default, expected calls are not enforced to run in any particular order. -// Call order dependency can be enforced by use of InOrder and/or Call.After. -// Call.After can create more varied call order dependencies, but InOrder is -// often more convenient. -// -// The following examples create equivalent call order dependencies. -// -// Example of using Call.After to chain expected call order: -// -// firstCall := mockObj.EXPECT().SomeMethod(1, "first") -// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) -// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) -// -// Example of using InOrder to declare expected call order: -// -// gomock.InOrder( -// mockObj.EXPECT().SomeMethod(1, "first"), -// mockObj.EXPECT().SomeMethod(2, "second"), -// mockObj.EXPECT().SomeMethod(3, "third"), -// ) package gomock import ( @@ -63,8 +25,8 @@ import ( // A TestReporter is something that can be used to report test failures. It // is satisfied by the standard library's *testing.T. type TestReporter interface { - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) + Errorf(format string, args ...any) + Fatalf(format string, args ...any) } // TestHelper is a TestReporter that has the Helper method. It is satisfied @@ -89,24 +51,21 @@ type cleanuper interface { // goroutines. Each test should create a new Controller and invoke Finish via // defer. // -// func TestFoo(t *testing.T) { -// ctrl := gomock.NewController(t) -// defer ctrl.Finish() -// // .. -// } +// func TestFoo(t *testing.T) { +// ctrl := gomock.NewController(t) +// // .. +// } // -// func TestBar(t *testing.T) { -// t.Run("Sub-Test-1", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// t.Run("Sub-Test-2", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// }) +// func TestBar(t *testing.T) { +// t.Run("Sub-Test-1", st) { +// ctrl := gomock.NewController(st) +// // .. +// }) +// t.Run("Sub-Test-2", st) { +// ctrl := gomock.NewController(st) +// // .. +// }) +// }) type Controller struct { // T should only be called within a generated mock. It is not intended to // be used in user code and may be changed in future versions. T is the @@ -119,12 +78,11 @@ type Controller struct { finished bool } -// NewController returns a new Controller. It is the preferred way to create a -// Controller. +// NewController returns a new Controller. It is the preferred way to create a Controller. // -// New in go1.14+, if you are passing a *testing.T into this function you no -// longer need to call ctrl.Finish() in your test methods. -func NewController(t TestReporter) *Controller { +// Passing [*testing.T] registers cleanup function to automatically call [Controller.Finish] +// when the test and all its subtests complete. +func NewController(t TestReporter, opts ...ControllerOption) *Controller { h, ok := t.(TestHelper) if !ok { h = &nopTestHelper{t} @@ -133,6 +91,9 @@ func NewController(t TestReporter) *Controller { T: h, expectedCalls: newCallSet(), } + for _, opt := range opts { + opt.apply(ctrl) + } if c, ok := isCleanuper(ctrl.T); ok { c.Cleanup(func() { ctrl.T.Helper() @@ -143,15 +104,32 @@ func NewController(t TestReporter) *Controller { return ctrl } +// ControllerOption configures how a Controller should behave. +type ControllerOption interface { + apply(*Controller) +} + +type overridableExpectationsOption struct{} + +// WithOverridableExpectations allows for overridable call expectations +// i.e., subsequent call expectations override existing call expectations +func WithOverridableExpectations() overridableExpectationsOption { + return overridableExpectationsOption{} +} + +func (o overridableExpectationsOption) apply(ctrl *Controller) { + ctrl.expectedCalls = newOverridableCallSet() +} + type cancelReporter struct { t TestHelper cancel func() } -func (r *cancelReporter) Errorf(format string, args ...interface{}) { +func (r *cancelReporter) Errorf(format string, args ...any) { r.t.Errorf(format, args...) } -func (r *cancelReporter) Fatalf(format string, args ...interface{}) { +func (r *cancelReporter) Fatalf(format string, args ...any) { defer r.cancel() r.t.Fatalf(format, args...) } @@ -176,17 +154,17 @@ type nopTestHelper struct { t TestReporter } -func (h *nopTestHelper) Errorf(format string, args ...interface{}) { +func (h *nopTestHelper) Errorf(format string, args ...any) { h.t.Errorf(format, args...) } -func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { +func (h *nopTestHelper) Fatalf(format string, args ...any) { h.t.Fatalf(format, args...) } func (h nopTestHelper) Helper() {} // RecordCall is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { +func (ctrl *Controller) RecordCall(receiver any, method string, args ...any) *Call { ctrl.T.Helper() recv := reflect.ValueOf(receiver) @@ -200,7 +178,7 @@ func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ... } // RecordCallWithMethodType is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { +func (ctrl *Controller) RecordCallWithMethodType(receiver any, method string, methodType reflect.Type, args ...any) *Call { ctrl.T.Helper() call := newCall(ctrl.T, receiver, method, methodType, args...) @@ -213,11 +191,11 @@ func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method st } // Call is called by a mock. It should not be called by user code. -func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { +func (ctrl *Controller) Call(receiver any, method string, args ...any) []any { ctrl.T.Helper() // Nest this code so we can use defer to make sure the lock is released. - actions := func() []func([]interface{}) []interface{} { + actions := func() []func([]any) []any { ctrl.T.Helper() ctrl.mu.Lock() defer ctrl.mu.Unlock() @@ -246,7 +224,7 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf return actions }() - var rets []interface{} + var rets []any for _, action := range actions { if r := action(args); r != nil { rets = r @@ -256,12 +234,8 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf return rets } -// Finish checks to see if all the methods that were expected to be called -// were called. It should be invoked for each Controller. It is not idempotent -// and therefore can only be invoked once. -// -// New in go1.14+, if you are passing a *testing.T into NewController function you no -// longer need to call ctrl.Finish() in your test methods. +// Finish checks to see if all the methods that were expected to be called were called. +// It is not idempotent and therefore can only be invoked once. func (ctrl *Controller) Finish() { // If we're currently panicking, probably because this is a deferred call. // This must be recovered in the deferred function. @@ -269,7 +243,15 @@ func (ctrl *Controller) Finish() { ctrl.finish(false, err) } -func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { +// Satisfied returns whether all expected calls bound to this Controller have been satisfied. +// Calling Finish is then guaranteed to not fail due to missing calls. +func (ctrl *Controller) Satisfied() bool { + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + return ctrl.expectedCalls.Satisfied() +} + +func (ctrl *Controller) finish(cleanup bool, panicErr any) { ctrl.T.Helper() ctrl.mu.Lock() diff --git a/vendor/go.uber.org/mock/gomock/doc.go b/vendor/go.uber.org/mock/gomock/doc.go new file mode 100644 index 000000000..696dda388 --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/doc.go @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gomock is a mock framework for Go. +// +// Standard usage: +// +// (1) Define an interface that you wish to mock. +// type MyInterface interface { +// SomeMethod(x int64, y string) +// } +// (2) Use mockgen to generate a mock from the interface. +// (3) Use the mock in a test: +// func TestMyThing(t *testing.T) { +// mockCtrl := gomock.NewController(t) +// mockObj := something.NewMockMyInterface(mockCtrl) +// mockObj.EXPECT().SomeMethod(4, "blah") +// // pass mockObj to a real object and play with it. +// } +// +// By default, expected calls are not enforced to run in any particular order. +// Call order dependency can be enforced by use of InOrder and/or Call.After. +// Call.After can create more varied call order dependencies, but InOrder is +// often more convenient. +// +// The following examples create equivalent call order dependencies. +// +// Example of using Call.After to chain expected call order: +// +// firstCall := mockObj.EXPECT().SomeMethod(1, "first") +// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) +// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) +// +// Example of using InOrder to declare expected call order: +// +// gomock.InOrder( +// mockObj.EXPECT().SomeMethod(1, "first"), +// mockObj.EXPECT().SomeMethod(2, "second"), +// mockObj.EXPECT().SomeMethod(3, "third"), +// ) +// +// The standard TestReporter most users will pass to `NewController` is a +// `*testing.T` from the context of the test. Note that this will use the +// standard `t.Error` and `t.Fatal` methods to report what happened in the test. +// In some cases this can leave your testing package in a weird state if global +// state is used since `t.Fatal` is like calling panic in the middle of a +// function. In these cases it is recommended that you pass in your own +// `TestReporter`. +package gomock diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/go.uber.org/mock/gomock/matchers.go similarity index 63% rename from vendor/github.com/golang/mock/gomock/matchers.go rename to vendor/go.uber.org/mock/gomock/matchers.go index 2822fb2c8..c17255096 100644 --- a/vendor/github.com/golang/mock/gomock/matchers.go +++ b/vendor/go.uber.org/mock/gomock/matchers.go @@ -17,6 +17,7 @@ package gomock import ( "fmt" "reflect" + "regexp" "strings" ) @@ -24,7 +25,7 @@ import ( // It is used to represent the valid or expected arguments to a mocked method. type Matcher interface { // Matches returns whether x is a match. - Matches(x interface{}) bool + Matches(x any) bool // String describes what the matcher matches. String() string @@ -35,7 +36,7 @@ type Matcher interface { // printing . func WantFormatter(s fmt.Stringer, m Matcher) Matcher { type matcher interface { - Matches(x interface{}) bool + Matches(x any) bool } return struct { @@ -63,16 +64,16 @@ func (f StringerFunc) String() string { type GotFormatter interface { // Got is invoked with the received value. The result is used when // printing the failure message. - Got(got interface{}) string + Got(got any) string } // GotFormatterFunc type is an adapter to allow the use of ordinary // functions as a GotFormatter. If f is a function with the appropriate // signature, GotFormatterFunc(f) is a GotFormatter that calls f. -type GotFormatterFunc func(got interface{}) string +type GotFormatterFunc func(got any) string // Got implements GotFormatter. -func (f GotFormatterFunc) Got(got interface{}) string { +func (f GotFormatterFunc) Got(got any) string { return f(got) } @@ -89,7 +90,7 @@ func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher { type anyMatcher struct{} -func (anyMatcher) Matches(interface{}) bool { +func (anyMatcher) Matches(any) bool { return true } @@ -97,11 +98,23 @@ func (anyMatcher) String() string { return "is anything" } +type condMatcher struct { + fn func(x any) bool +} + +func (c condMatcher) Matches(x any) bool { + return c.fn(x) +} + +func (condMatcher) String() string { + return "adheres to a custom condition" +} + type eqMatcher struct { - x interface{} + x any } -func (e eqMatcher) Matches(x interface{}) bool { +func (e eqMatcher) Matches(x any) bool { // In case, some value is nil if e.x == nil || x == nil { return reflect.DeepEqual(e.x, x) @@ -125,7 +138,7 @@ func (e eqMatcher) String() string { type nilMatcher struct{} -func (nilMatcher) Matches(x interface{}) bool { +func (nilMatcher) Matches(x any) bool { if x == nil { return true } @@ -148,7 +161,7 @@ type notMatcher struct { m Matcher } -func (n notMatcher) Matches(x interface{}) bool { +func (n notMatcher) Matches(x any) bool { return !n.m.Matches(x) } @@ -156,11 +169,30 @@ func (n notMatcher) String() string { return "not(" + n.m.String() + ")" } +type regexMatcher struct { + regex *regexp.Regexp +} + +func (m regexMatcher) Matches(x any) bool { + switch t := x.(type) { + case string: + return m.regex.MatchString(t) + case []byte: + return m.regex.Match(t) + default: + return false + } +} + +func (m regexMatcher) String() string { + return "matches regex " + m.regex.String() +} + type assignableToTypeOfMatcher struct { targetType reflect.Type } -func (m assignableToTypeOfMatcher) Matches(x interface{}) bool { +func (m assignableToTypeOfMatcher) Matches(x any) bool { return reflect.TypeOf(x).AssignableTo(m.targetType) } @@ -168,11 +200,32 @@ func (m assignableToTypeOfMatcher) String() string { return "is assignable to " + m.targetType.Name() } +type anyOfMatcher struct { + matchers []Matcher +} + +func (am anyOfMatcher) Matches(x any) bool { + for _, m := range am.matchers { + if m.Matches(x) { + return true + } + } + return false +} + +func (am anyOfMatcher) String() string { + ss := make([]string, 0, len(am.matchers)) + for _, matcher := range am.matchers { + ss = append(ss, matcher.String()) + } + return strings.Join(ss, " | ") +} + type allMatcher struct { matchers []Matcher } -func (am allMatcher) Matches(x interface{}) bool { +func (am allMatcher) Matches(x any) bool { for _, m := range am.matchers { if !m.Matches(x) { return false @@ -193,7 +246,7 @@ type lenMatcher struct { i int } -func (m lenMatcher) Matches(x interface{}) bool { +func (m lenMatcher) Matches(x any) bool { v := reflect.ValueOf(x) switch v.Kind() { case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: @@ -208,10 +261,10 @@ func (m lenMatcher) String() string { } type inAnyOrderMatcher struct { - x interface{} + x any } -func (m inAnyOrderMatcher) Matches(x interface{}) bool { +func (m inAnyOrderMatcher) Matches(x any) bool { given, ok := m.prepareValue(x) if !ok { return false @@ -257,7 +310,7 @@ func (m inAnyOrderMatcher) Matches(x interface{}) bool { return extraInGiven == 0 && missingFromWanted == 0 } -func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { +func (m inAnyOrderMatcher) prepareValue(x any) (reflect.Value, bool) { xValue := reflect.ValueOf(x) switch xValue.Kind() { case reflect.Slice, reflect.Array: @@ -280,12 +333,45 @@ func All(ms ...Matcher) Matcher { return allMatcher{ms} } // Any returns a matcher that always matches. func Any() Matcher { return anyMatcher{} } +// Cond returns a matcher that matches when the given function returns true +// after passing it the parameter to the mock function. +// This is particularly useful in case you want to match over a field of a custom struct, or dynamic logic. +// +// Example usage: +// +// Cond(func(x any){return x.(int) == 1}).Matches(1) // returns true +// Cond(func(x any){return x.(int) == 2}).Matches(1) // returns false +func Cond(fn func(x any) bool) Matcher { return condMatcher{fn} } + +// AnyOf returns a composite Matcher that returns true if at least one of the +// matchers returns true. +// +// Example usage: +// +// AnyOf(1, 2, 3).Matches(2) // returns true +// AnyOf(1, 2, 3).Matches(10) // returns false +// AnyOf(Nil(), Len(2)).Matches(nil) // returns true +// AnyOf(Nil(), Len(2)).Matches("hi") // returns true +// AnyOf(Nil(), Len(2)).Matches("hello") // returns false +func AnyOf(xs ...any) Matcher { + ms := make([]Matcher, 0, len(xs)) + for _, x := range xs { + if m, ok := x.(Matcher); ok { + ms = append(ms, m) + } else { + ms = append(ms, Eq(x)) + } + } + return anyOfMatcher{ms} +} + // Eq returns a matcher that matches on equality. // // Example usage: -// Eq(5).Matches(5) // returns true -// Eq(5).Matches(4) // returns false -func Eq(x interface{}) Matcher { return eqMatcher{x} } +// +// Eq(5).Matches(5) // returns true +// Eq(5).Matches(4) // returns false +func Eq(x any) Matcher { return eqMatcher{x} } // Len returns a matcher that matches on length. This matcher returns false if // is compared to a type that is not an array, chan, map, slice, or string. @@ -296,35 +382,50 @@ func Len(i int) Matcher { // Nil returns a matcher that matches if the received value is nil. // // Example usage: -// var x *bytes.Buffer -// Nil().Matches(x) // returns true -// x = &bytes.Buffer{} -// Nil().Matches(x) // returns false +// +// var x *bytes.Buffer +// Nil().Matches(x) // returns true +// x = &bytes.Buffer{} +// Nil().Matches(x) // returns false func Nil() Matcher { return nilMatcher{} } // Not reverses the results of its given child matcher. // // Example usage: -// Not(Eq(5)).Matches(4) // returns true -// Not(Eq(5)).Matches(5) // returns false -func Not(x interface{}) Matcher { +// +// Not(Eq(5)).Matches(4) // returns true +// Not(Eq(5)).Matches(5) // returns false +func Not(x any) Matcher { if m, ok := x.(Matcher); ok { return notMatcher{m} } return notMatcher{Eq(x)} } +// Regex checks whether parameter matches the associated regex. +// +// Example usage: +// +// Regex("[0-9]{2}:[0-9]{2}").Matches("23:02") // returns true +// Regex("[0-9]{2}:[0-9]{2}").Matches([]byte{'2', '3', ':', '0', '2'}) // returns true +// Regex("[0-9]{2}:[0-9]{2}").Matches("hello world") // returns false +// Regex("[0-9]{2}").Matches(21) // returns false as it's not a valid type +func Regex(regexStr string) Matcher { + return regexMatcher{regex: regexp.MustCompile(regexStr)} +} + // AssignableToTypeOf is a Matcher that matches if the parameter to the mock // function is assignable to the type of the parameter to this function. // // Example usage: -// var s fmt.Stringer = &bytes.Buffer{} -// AssignableToTypeOf(s).Matches(time.Second) // returns true -// AssignableToTypeOf(s).Matches(99) // returns false // -// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() -// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true -func AssignableToTypeOf(x interface{}) Matcher { +// var s fmt.Stringer = &bytes.Buffer{} +// AssignableToTypeOf(s).Matches(time.Second) // returns true +// AssignableToTypeOf(s).Matches(99) // returns false +// +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() +// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true +func AssignableToTypeOf(x any) Matcher { if xt, ok := x.(reflect.Type); ok { return assignableToTypeOfMatcher{xt} } @@ -334,8 +435,9 @@ func AssignableToTypeOf(x interface{}) Matcher { // InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. // // Example usage: -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false -func InAnyOrder(x interface{}) Matcher { +// +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false +func InAnyOrder(x any) Matcher { return inAnyOrderMatcher{x} } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b18efb743..948a3ee63 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 575456c83..bd6b17e15 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -226,7 +226,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } type MatchCondition v1.MatchCondition diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index c199702fb..12c680dc9 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -242,7 +242,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } // ParamKind is a tuple of Group Kind and Version. diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 4f0822440..f89977988 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -229,8 +229,8 @@ message JobSpec { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 backoffLimitPerIndex = 12; @@ -242,8 +242,8 @@ message JobSpec { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 maxFailedIndexes = 13; @@ -326,7 +326,8 @@ message JobSpec { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. // +optional optional string podReplacementPolicy = 14; } @@ -375,8 +376,8 @@ message JobStatus { // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). // - // This field is alpha-level. The job controller populates the field when - // the feature gate JobPodReplacementPolicy is enabled (disabled by default). + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). // +optional optional int32 terminating = 11; @@ -398,8 +399,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional string failedIndexes = 10; @@ -421,9 +422,6 @@ message JobStatus { optional UncountedTerminatedPods uncountedTerminatedPods = 8; // The number of pods which have a Ready condition. - // - // This field is beta-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (enabled by default). // +optional optional int32 ready = 9; } @@ -512,8 +510,8 @@ message PodFailurePolicyRule { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8a28614c0..53fdf3c8d 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -124,6 +124,7 @@ const ( // This is an action which might be taken on a pod failure - mark the // Job's index as failed to avoid restarts within this index. This action // can only be used when backoffLimitPerIndex is set. + // This value is beta-level. PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" // This is an action which might be taken on a pod failure - the counter towards @@ -218,8 +219,8 @@ type PodFailurePolicyRule struct { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -303,8 +304,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` @@ -316,8 +317,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` @@ -405,7 +406,8 @@ type JobSpec struct { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` } @@ -454,8 +456,8 @@ type JobStatus struct { // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). // - // This field is alpha-level. The job controller populates the field when - // the feature gate JobPodReplacementPolicy is enabled (disabled by default). + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). // +optional Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"` @@ -477,8 +479,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` @@ -500,9 +502,6 @@ type JobStatus struct { UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` // The number of pods which have a Ready condition. - // - // This field is beta-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (enabled by default). // +optional Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -535,6 +534,25 @@ const ( JobFailureTarget JobConditionType = "FailureTarget" ) +const ( + // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to + // a failed pod matching a pod failure policy rule + // https://kep.k8s.io/3329 + // This is currently a beta field. + JobReasonPodFailurePolicy string = "PodFailurePolicy" + // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of + // times higher than backOffLimit times. + JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" + // JobReasponDeadlineExceeded means job duration is past ActiveDeadline + JobReasonDeadlineExceeded string = "DeadlineExceeded" + // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" + // JobReasonFailedIndexes means Job has failed indexes. + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonFailedIndexes string = "FailedIndexes" +) + // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 43b4e1e7d..980f1e475 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,15 +117,15 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", - "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", - "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -140,11 +140,11 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).", + "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", + "ready": "The number of pods which have a Ready condition.", } func (JobStatus) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index c267a5feb..04c7939e0 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -525,10 +525,38 @@ func (m *ClientIPConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo +func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } +func (*ClusterTrustBundleProjection) ProtoMessage() {} +func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src) +} +func (m *ClusterTrustBundleProjection) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo + func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} + return fileDescriptor_83c10c24ec417dc9, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_83c10c24ec417dc9, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_83c10c24ec417dc9, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_83c10c24ec417dc9, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_83c10c24ec417dc9, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_83c10c24ec417dc9, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_83c10c24ec417dc9, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_83c10c24ec417dc9, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_83c10c24ec417dc9, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_83c10c24ec417dc9, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_83c10c24ec417dc9, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_83c10c24ec417dc9, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_83c10c24ec417dc9, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1088,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1116,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2429,10 +2457,38 @@ func (m *LocalVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo +func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } +func (*ModifyVolumeStatus) ProtoMessage() {} +func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModifyVolumeStatus.Merge(m, src) +} +func (m *ModifyVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *ModifyVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo + func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2516,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2544,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2572,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2600,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2628,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2656,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2684,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2712,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2740,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2768,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2796,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2824,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2852,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2880,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2908,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2936,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2964,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2992,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3020,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3048,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3076,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3104,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3132,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3160,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3188,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3524,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3552,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3580,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3608,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3636,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3664,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3692,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3720,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3748,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3776,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3804,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3832,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3860,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3888,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3916,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3944,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3972,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4000,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4028,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4056,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4084,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4112,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4140,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4168,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4196,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4224,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4252,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4280,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4308,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4336,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4364,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4392,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4420,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4448,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4476,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4504,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4532,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4560,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4588,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4616,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4644,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4672,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4700,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4728,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4756,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4784,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4812,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4840,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4868,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4896,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4924,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4952,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4980,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5008,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5036,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5064,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5092,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5120,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5148,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5176,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5204,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5232,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5260,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5288,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5316,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5344,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5372,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5400,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5428,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5456,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5484,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5512,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5540,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5568,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5596,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5624,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5593,10 +5649,38 @@ func (m *SessionAffinityConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo +func (m *SleepAction) Reset() { *m = SleepAction{} } +func (*SleepAction) ProtoMessage() {} +func (*SleepAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *SleepAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SleepAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepAction.Merge(m, src) +} +func (m *SleepAction) XXX_Size() int { + return m.Size() +} +func (m *SleepAction) XXX_DiscardUnknown() { + xxx_messageInfo_SleepAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepAction proto.InternalMessageInfo + func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5708,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5736,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5764,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5792,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5820,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5848,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5876,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5904,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5932,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5960,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5988,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6016,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6044,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6072,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6100,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{216} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6041,10 +6125,38 @@ func (m *VolumeProjection) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo +func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } +func (*VolumeResourceRequirements) ProtoMessage() {} +func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{217} +} +func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeResourceRequirements.Merge(m, src) +} +func (m *VolumeResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *VolumeResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo + func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{218} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6184,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{215} + return fileDescriptor_83c10c24ec417dc9, []int{219} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6212,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{216} + return fileDescriptor_83c10c24ec417dc9, []int{220} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6240,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{217} + return fileDescriptor_83c10c24ec417dc9, []int{221} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6173,6 +6285,7 @@ func init() { proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") @@ -6251,6 +6364,7 @@ func init() { proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus") proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") @@ -6382,6 +6496,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction") proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") @@ -6398,6 +6513,9 @@ func init() { proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") @@ -6409,934 +6527,974 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14822 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x24, 0xc9, - 0x75, 0x18, 0xcc, 0xea, 0xc6, 0xd5, 0x0f, 0x77, 0x62, 0x0e, 0x0c, 0x76, 0x66, 0x7a, 0xb6, 0x76, - 0x77, 0x76, 0xf6, 0xc2, 0x70, 0xf6, 0x20, 0x97, 0xbb, 0xe4, 0x8a, 0x38, 0x67, 0xb0, 0x03, 0x60, - 0x7a, 0xb3, 0x31, 0x33, 0xe4, 0x72, 0xc9, 0x60, 0xa1, 0x3b, 0x01, 0x14, 0xd1, 0xa8, 0xea, 0xad, - 0xaa, 0xc6, 0x0c, 0xe6, 0x23, 0x43, 0x12, 0xf5, 0xe9, 0xa0, 0xa4, 0xef, 0x0b, 0xc6, 0x17, 0xfa, - 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x59, 0x87, 0x69, 0xd9, 0xa6, 0x29, 0x4b, 0xb2, 0xa8, 0xcb, - 0x57, 0x58, 0x72, 0x38, 0x64, 0x59, 0x11, 0x16, 0x15, 0xa1, 0x30, 0x24, 0x8e, 0x1c, 0x21, 0x2b, - 0xc2, 0x96, 0xe4, 0xe3, 0x87, 0x0d, 0xcb, 0x96, 0x23, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x31, 0x8b, - 0x01, 0x97, 0x8c, 0xfd, 0xd7, 0xfd, 0xde, 0xcb, 0x97, 0x59, 0x79, 0xbe, 0x7c, 0xef, 0xe5, 0x7b, - 0xf0, 0xea, 0xf6, 0xcb, 0xe1, 0xb4, 0xeb, 0x5f, 0xde, 0x6e, 0xad, 0x93, 0xc0, 0x23, 0x11, 0x09, - 0x2f, 0xef, 0x12, 0xaf, 0xee, 0x07, 0x97, 0x05, 0xc2, 0x69, 0xba, 0x97, 0x6b, 0x7e, 0x40, 0x2e, - 0xef, 0x5e, 0xb9, 0xbc, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0x4f, 0x37, 0x03, 0x3f, 0xf2, 0x11, - 0xe2, 0x34, 0xd3, 0x4e, 0xd3, 0x9d, 0xa6, 0x34, 0xd3, 0xbb, 0x57, 0xa6, 0x9e, 0xdb, 0x74, 0xa3, - 0xad, 0xd6, 0xfa, 0x74, 0xcd, 0xdf, 0xb9, 0xbc, 0xe9, 0x6f, 0xfa, 0x97, 0x19, 0xe9, 0x7a, 0x6b, - 0x83, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xea, 0xc5, 0xb8, 0x9a, 0x1d, 0xa7, 0xb6, 0xe5, - 0x7a, 0x24, 0xd8, 0xbb, 0xdc, 0xdc, 0xde, 0x64, 0xf5, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x8d, 0x24, - 0x2b, 0x6e, 0x5b, 0x2a, 0xbc, 0xbc, 0x43, 0x22, 0x27, 0xa3, 0xb9, 0x53, 0x97, 0xf3, 0x4a, 0x05, - 0x2d, 0x2f, 0x72, 0x77, 0xd2, 0xd5, 0x7c, 0xa0, 0x53, 0x81, 0xb0, 0xb6, 0x45, 0x76, 0x9c, 0x54, - 0xb9, 0x17, 0xf2, 0xca, 0xb5, 0x22, 0xb7, 0x71, 0xd9, 0xf5, 0xa2, 0x30, 0x0a, 0x92, 0x85, 0xec, - 0xaf, 0x5b, 0x70, 0x61, 0xe6, 0x76, 0x75, 0xa1, 0xe1, 0x84, 0x91, 0x5b, 0x9b, 0x6d, 0xf8, 0xb5, - 0xed, 0x6a, 0xe4, 0x07, 0xe4, 0x96, 0xdf, 0x68, 0xed, 0x90, 0x2a, 0xeb, 0x08, 0xf4, 0x2c, 0x0c, - 0xec, 0xb2, 0xff, 0x4b, 0xf3, 0x93, 0xd6, 0x05, 0xeb, 0x52, 0x69, 0x76, 0xec, 0xb7, 0xf6, 0xcb, - 0xef, 0xbb, 0xbf, 0x5f, 0x1e, 0xb8, 0x25, 0xe0, 0x58, 0x51, 0xa0, 0x8b, 0xd0, 0xb7, 0x11, 0xae, - 0xed, 0x35, 0xc9, 0x64, 0x81, 0xd1, 0x8e, 0x08, 0xda, 0xbe, 0xc5, 0x2a, 0x85, 0x62, 0x81, 0x45, - 0x97, 0xa1, 0xd4, 0x74, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2c, 0x5e, 0xb0, 0x2e, 0xf5, 0xce, - 0x8e, 0x0b, 0xd2, 0x52, 0x45, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x7e, 0xc3, 0x6b, - 0xec, 0x4d, 0xf6, 0x5c, 0xb0, 0x2e, 0x0d, 0xc4, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, 0xfd, 0xa5, - 0x02, 0x0c, 0xcc, 0x6c, 0x6c, 0xb8, 0x9e, 0x1b, 0xed, 0xa1, 0x5b, 0x30, 0xe4, 0xf9, 0x75, 0x22, - 0xff, 0xb3, 0xaf, 0x18, 0x7c, 0xfe, 0xc2, 0x74, 0x7a, 0x2a, 0x4d, 0xaf, 0x6a, 0x74, 0xb3, 0x63, - 0xf7, 0xf7, 0xcb, 0x43, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xc1, 0xa6, 0x5f, 0x57, 0x6c, 0x0b, - 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x25, 0x26, 0x9b, 0x1d, 0xbd, 0xbf, 0x5f, 0x1e, 0xd4, 0x00, 0x58, - 0x67, 0x82, 0xd6, 0x61, 0x94, 0xfe, 0xf5, 0x22, 0x57, 0xf1, 0x2d, 0x32, 0xbe, 0x8f, 0xe5, 0xf1, - 0xd5, 0x48, 0x67, 0x27, 0xee, 0xef, 0x97, 0x47, 0x13, 0x40, 0x9c, 0x64, 0x68, 0xdf, 0x83, 0x91, - 0x99, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x08, 0x3d, 0x9e, 0xb3, 0x43, 0xc4, - 0xf8, 0x5e, 0x10, 0x1d, 0xdb, 0xb3, 0xea, 0xec, 0x90, 0x83, 0xfd, 0xf2, 0xd8, 0x4d, 0xcf, 0x7d, - 0xbb, 0x25, 0x66, 0x05, 0x85, 0x61, 0x46, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x5d, 0xb7, 0x46, 0x2a, - 0x4e, 0xb4, 0x25, 0xc6, 0x1b, 0x89, 0xb2, 0x30, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xef, 0x42, 0x69, - 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x1e, 0xa2, 0x6d, 0x18, 0x6d, 0x06, 0x64, 0x83, 0x04, 0x0a, - 0x34, 0x69, 0x5d, 0x28, 0x5e, 0x1a, 0x7c, 0xfe, 0x52, 0xe6, 0xc7, 0x9a, 0xa4, 0x0b, 0x5e, 0x14, - 0xec, 0xcd, 0x9e, 0x16, 0xf5, 0x8d, 0x26, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0x27, 0x05, 0x38, 0x39, - 0x73, 0xaf, 0x15, 0x90, 0x79, 0x37, 0xdc, 0x4e, 0xce, 0xf0, 0xba, 0x1b, 0x6e, 0xaf, 0xc6, 0x3d, - 0xa0, 0xa6, 0xd6, 0xbc, 0x80, 0x63, 0x45, 0x81, 0x9e, 0x83, 0x7e, 0xfa, 0xfb, 0x26, 0x5e, 0x12, - 0x9f, 0x3c, 0x21, 0x88, 0x07, 0xe7, 0x9d, 0xc8, 0x99, 0xe7, 0x28, 0x2c, 0x69, 0xd0, 0x0a, 0x0c, - 0xd6, 0xd8, 0x82, 0xdc, 0x5c, 0xf1, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf6, 0x19, 0x4a, 0x3e, 0x17, - 0x83, 0x0f, 0xf6, 0xcb, 0x93, 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, - 0xd5, 0xc3, 0x38, 0x41, 0xc6, 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x5e, 0xb6, 0x54, 0x86, 0xb2, 0x97, - 0x09, 0xba, 0x02, 0x3d, 0xdb, 0xae, 0x57, 0x9f, 0xec, 0x63, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xee, - 0x7a, 0xf5, 0x83, 0xfd, 0xf2, 0xb8, 0xd1, 0x1c, 0x0a, 0xc4, 0x8c, 0xd4, 0xfe, 0xcf, 0x16, 0x94, - 0x19, 0x6e, 0xd1, 0x6d, 0x90, 0x0a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x1d, 0xfa, 0x3c, - 0x40, 0x48, 0x6a, 0x01, 0x89, 0xb4, 0x2e, 0x55, 0x13, 0xa3, 0xaa, 0x30, 0x58, 0xa3, 0xa2, 0x1b, - 0x42, 0xb8, 0xe5, 0x04, 0x6c, 0x7e, 0x89, 0x8e, 0x55, 0x1b, 0x42, 0x55, 0x22, 0x70, 0x4c, 0x63, - 0x6c, 0x08, 0xc5, 0x4e, 0x1b, 0x02, 0xfa, 0x08, 0x8c, 0xc6, 0x95, 0x85, 0x4d, 0xa7, 0x26, 0x3b, - 0x90, 0x2d, 0x99, 0xaa, 0x89, 0xc2, 0x49, 0x5a, 0xfb, 0x6f, 0x5a, 0x62, 0xf2, 0xd0, 0xaf, 0x7e, - 0x97, 0x7f, 0xab, 0xfd, 0xcb, 0x16, 0xf4, 0xcf, 0xba, 0x5e, 0xdd, 0xf5, 0x36, 0xd1, 0xa7, 0x61, - 0x80, 0x9e, 0x4d, 0x75, 0x27, 0x72, 0xc4, 0xbe, 0xf7, 0x7e, 0x6d, 0x6d, 0xa9, 0xa3, 0x62, 0xba, - 0xb9, 0xbd, 0x49, 0x01, 0xe1, 0x34, 0xa5, 0xa6, 0xab, 0xed, 0xc6, 0xfa, 0x67, 0x48, 0x2d, 0x5a, - 0x21, 0x91, 0x13, 0x7f, 0x4e, 0x0c, 0xc3, 0x8a, 0x2b, 0xba, 0x0e, 0x7d, 0x91, 0x13, 0x6c, 0x92, - 0x48, 0x6c, 0x80, 0x99, 0x1b, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x5e, 0x8d, 0xc4, 0xc7, 0xc2, - 0x1a, 0x2b, 0x8a, 0x05, 0x0b, 0xfb, 0x7f, 0xf4, 0xc3, 0x99, 0xb9, 0xea, 0x52, 0xce, 0xbc, 0xba, - 0x08, 0x7d, 0xf5, 0xc0, 0xdd, 0x25, 0x81, 0xe8, 0x67, 0xc5, 0x65, 0x9e, 0x41, 0xb1, 0xc0, 0xa2, - 0x97, 0x61, 0x88, 0x1f, 0x48, 0xd7, 0x1c, 0xaf, 0xde, 0x90, 0x5d, 0x7c, 0x42, 0x50, 0x0f, 0xdd, - 0xd2, 0x70, 0xd8, 0xa0, 0x3c, 0xe4, 0xa4, 0xba, 0x98, 0x58, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2c, - 0x18, 0xe3, 0xd5, 0xcc, 0x44, 0x51, 0xe0, 0xae, 0xb7, 0x22, 0x12, 0x4e, 0xf6, 0xb2, 0x9d, 0x6e, - 0x2e, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xfa, 0x56, 0x82, 0x0b, 0xdf, 0x04, 0x27, 0x45, 0xbd, 0x63, - 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xc7, 0x82, 0xa9, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, 0x90, - 0xa0, 0xd2, 0x5a, 0x6f, 0xb8, 0xe1, 0x16, 0x9f, 0xa7, 0x98, 0x6c, 0xb0, 0x9d, 0x20, 0x67, 0x0c, - 0x15, 0x91, 0x18, 0xc3, 0xf3, 0xf7, 0xf7, 0xcb, 0x53, 0x73, 0xb9, 0xac, 0x70, 0x9b, 0x6a, 0xd0, - 0x36, 0x20, 0x7a, 0x94, 0x56, 0x23, 0x67, 0x93, 0xc4, 0x95, 0xf7, 0x77, 0x5f, 0xf9, 0xa9, 0xfb, - 0xfb, 0x65, 0xb4, 0x9a, 0x62, 0x81, 0x33, 0xd8, 0xa2, 0xb7, 0xe1, 0x04, 0x85, 0xa6, 0xbe, 0x75, - 0xa0, 0xfb, 0xea, 0x26, 0xef, 0xef, 0x97, 0x4f, 0xac, 0x66, 0x30, 0xc1, 0x99, 0xac, 0xd1, 0x77, - 0x59, 0x70, 0x26, 0xfe, 0xfc, 0x85, 0xbb, 0x4d, 0xc7, 0xab, 0xc7, 0x15, 0x97, 0xba, 0xaf, 0x98, - 0xee, 0xc9, 0x67, 0xe6, 0xf2, 0x38, 0xe1, 0xfc, 0x4a, 0x90, 0x07, 0x13, 0xb4, 0x69, 0xc9, 0xba, - 0xa1, 0xfb, 0xba, 0x4f, 0xdf, 0xdf, 0x2f, 0x4f, 0xac, 0xa6, 0x79, 0xe0, 0x2c, 0xc6, 0x53, 0x73, - 0x70, 0x32, 0x73, 0x76, 0xa2, 0x31, 0x28, 0x6e, 0x13, 0x2e, 0x75, 0x95, 0x30, 0xfd, 0x89, 0x4e, - 0x40, 0xef, 0xae, 0xd3, 0x68, 0x89, 0x85, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0x3f, - 0x2d, 0xc2, 0xe8, 0x5c, 0x75, 0xe9, 0x81, 0x56, 0xbd, 0x7e, 0xec, 0x15, 0xda, 0x1e, 0x7b, 0xf1, - 0x21, 0x5a, 0xcc, 0x3d, 0x44, 0xbf, 0x33, 0x63, 0xc9, 0xf6, 0xb0, 0x25, 0xfb, 0xa1, 0x9c, 0x25, - 0x7b, 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb2, 0x01, 0xcc, 0x94, 0x90, 0x96, 0xfd, 0x9a, - 0xd3, 0x48, 0x6e, 0xb5, 0x87, 0x9c, 0xba, 0x47, 0x33, 0x8e, 0x35, 0x18, 0x9a, 0x73, 0x9a, 0xce, - 0xba, 0xdb, 0x70, 0x23, 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0xb3, - 0x27, 0xef, 0xef, 0x97, 0x8b, 0x33, 0x75, 0x2a, 0x66, 0x80, 0xa2, 0xda, 0xc3, 0x94, 0x02, 0x3d, - 0x0d, 0x3d, 0xf5, 0xc0, 0x6f, 0x4e, 0x16, 0x18, 0x25, 0x5d, 0xe5, 0x3d, 0xf3, 0x81, 0xdf, 0x4c, - 0x90, 0x32, 0x1a, 0xfb, 0x37, 0x0b, 0x70, 0x76, 0x8e, 0x34, 0xb7, 0x16, 0xab, 0x39, 0xe7, 0xc5, - 0x25, 0x18, 0xd8, 0xf1, 0x3d, 0x37, 0xf2, 0x83, 0x50, 0x54, 0xcd, 0x66, 0xc4, 0x8a, 0x80, 0x61, - 0x85, 0x45, 0x17, 0xa0, 0xa7, 0x19, 0x0b, 0xb1, 0x43, 0x52, 0x00, 0x66, 0xe2, 0x2b, 0xc3, 0x50, - 0x8a, 0x56, 0x48, 0x02, 0x31, 0x63, 0x14, 0xc5, 0xcd, 0x90, 0x04, 0x98, 0x61, 0x62, 0x49, 0x80, - 0xca, 0x08, 0xe2, 0x44, 0x48, 0x48, 0x02, 0x14, 0x83, 0x35, 0x2a, 0x54, 0x81, 0x52, 0x98, 0x18, - 0xd9, 0xae, 0x96, 0xe6, 0x30, 0x13, 0x15, 0xd4, 0x48, 0xc6, 0x4c, 0x8c, 0x13, 0xac, 0xaf, 0xa3, - 0xa8, 0xf0, 0xb5, 0x02, 0x20, 0xde, 0x85, 0xdf, 0x62, 0x1d, 0x77, 0x33, 0xdd, 0x71, 0xdd, 0x2f, - 0x89, 0xa3, 0xea, 0xbd, 0xff, 0x62, 0xc1, 0xd9, 0x39, 0xd7, 0xab, 0x93, 0x20, 0x67, 0x02, 0x3e, - 0x9c, 0xbb, 0xf3, 0xe1, 0x84, 0x14, 0x63, 0x8a, 0xf5, 0x1c, 0xc1, 0x14, 0xb3, 0xff, 0xc2, 0x02, - 0xc4, 0x3f, 0xfb, 0x5d, 0xf7, 0xb1, 0x37, 0xd3, 0x1f, 0x7b, 0x04, 0xd3, 0xc2, 0xfe, 0x3b, 0x16, - 0x0c, 0xce, 0x35, 0x1c, 0x77, 0x47, 0x7c, 0xea, 0x1c, 0x8c, 0x4b, 0x45, 0x11, 0x03, 0x6b, 0xb2, - 0x3f, 0xdd, 0xdc, 0xc6, 0x71, 0x12, 0x89, 0xd3, 0xf4, 0xe8, 0x13, 0x70, 0xc6, 0x00, 0xae, 0x91, - 0x9d, 0x66, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, 0x2f, - 0xc3, 0xc8, 0x5c, 0xc3, 0x25, 0x5e, 0xb4, 0x54, 0x99, 0xf3, 0xbd, 0x0d, 0x77, 0x13, 0xbd, 0x02, - 0x23, 0x91, 0xbb, 0x43, 0xfc, 0x56, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xef, - 0x2c, 0xba, 0xbf, 0x5f, 0x1e, 0x59, 0x33, 0x30, 0x38, 0x41, 0x69, 0xff, 0x21, 0x1d, 0x71, 0x7f, - 0xa7, 0xe9, 0x7b, 0xc4, 0x8b, 0xe6, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0xaf, 0x40, 0x4f, 0x44, 0x47, - 0x90, 0x7f, 0xf9, 0x45, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, - 0x65, 0x65, 0xd0, 0x87, 0xa0, 0x2f, 0x8c, 0x9c, 0xa8, 0x15, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, - 0x95, 0x41, 0x0f, 0xf6, 0xcb, 0xa3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0xfd, 0x3b, - 0x24, 0x0c, 0x9d, 0x4d, 0x79, 0x7e, 0x8f, 0x8a, 0xb2, 0xfd, 0x2b, 0x1c, 0x8c, 0x25, 0x1e, 0x3d, - 0x06, 0xbd, 0x24, 0x08, 0xfc, 0x40, 0xec, 0x2a, 0xc3, 0x82, 0xb0, 0x77, 0x81, 0x02, 0x31, 0xc7, - 0xd9, 0xff, 0xd2, 0x82, 0x51, 0xd5, 0x56, 0x5e, 0xd7, 0x31, 0xdc, 0x9b, 0xde, 0x04, 0xa8, 0xc9, - 0x0f, 0x0c, 0xd9, 0x79, 0x37, 0xf8, 0xfc, 0xc5, 0x4c, 0xd1, 0x22, 0xd5, 0x8d, 0x31, 0x67, 0x05, - 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x35, 0x0b, 0x26, 0x12, 0x5f, 0xb4, 0xec, 0x86, 0x11, 0x7a, 0x2b, - 0xf5, 0x55, 0xd3, 0xdd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, - 0x1a, 0xf4, 0xba, 0x11, 0xd9, 0x91, 0x1f, 0xf3, 0x58, 0xdb, 0x8f, 0xe1, 0xad, 0x8a, 0x47, 0x64, - 0x89, 0x96, 0xc4, 0x9c, 0x81, 0xfd, 0x9b, 0x45, 0x28, 0xf1, 0x69, 0xbb, 0xe2, 0x34, 0x8f, 0x61, - 0x2c, 0x9e, 0x81, 0x92, 0xbb, 0xb3, 0xd3, 0x8a, 0x9c, 0x75, 0x71, 0x00, 0x0d, 0xf0, 0xcd, 0x60, - 0x49, 0x02, 0x71, 0x8c, 0x47, 0x4b, 0xd0, 0xc3, 0x9a, 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, - 0x6d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x7e, 0xea, 0xe4, 0xa3, 0x20, 0xcc, 0x58, 0x20, 0x07, - 0x60, 0xdd, 0xf5, 0x9c, 0x60, 0x8f, 0xc2, 0x26, 0x8b, 0x8c, 0xe1, 0x73, 0xed, 0x19, 0xce, 0x2a, - 0x7a, 0xce, 0x56, 0x7d, 0x58, 0x8c, 0xc0, 0x1a, 0xd3, 0xa9, 0x0f, 0x42, 0x49, 0x11, 0x1f, 0x46, - 0x84, 0x9b, 0xfa, 0x08, 0x8c, 0x26, 0xea, 0xea, 0x54, 0x7c, 0x48, 0x97, 0x00, 0x7f, 0x85, 0x6d, - 0x19, 0xa2, 0xd5, 0x0b, 0xde, 0xae, 0xd8, 0x39, 0xef, 0xc1, 0x89, 0x46, 0xc6, 0xde, 0x2b, 0xc6, - 0xb5, 0xfb, 0xbd, 0xfa, 0xac, 0xf8, 0xec, 0x13, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x95, 0x6a, 0xfc, - 0x26, 0x5d, 0x20, 0x4e, 0x43, 0xbf, 0x20, 0xdc, 0x10, 0x30, 0xac, 0xb0, 0x74, 0xbf, 0x3b, 0xa1, - 0x1a, 0x7f, 0x9d, 0xec, 0x55, 0x49, 0x83, 0xd4, 0x22, 0x3f, 0xf8, 0xa6, 0x36, 0xff, 0x1c, 0xef, - 0x7d, 0xbe, 0x5d, 0x0e, 0x0a, 0x06, 0xc5, 0xeb, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0xb6, - 0x5f, 0xf7, 0x15, 0x0b, 0x86, 0xd5, 0xd7, 0x1d, 0xc3, 0xbe, 0x30, 0x6b, 0xee, 0x0b, 0xe7, 0xda, - 0x4e, 0xf0, 0x9c, 0x1d, 0xe1, 0x6b, 0x05, 0x38, 0xa3, 0x68, 0xe8, 0x6d, 0x86, 0xff, 0x11, 0xb3, - 0xea, 0x32, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, - 0xea, 0xc5, 0xc7, 0xec, 0x90, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x59, 0x28, 0xb6, 0xdc, 0xba, 0x38, - 0x60, 0xde, 0x2f, 0x7b, 0xfb, 0xe6, 0xd2, 0xfc, 0xc1, 0x7e, 0xf9, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, - 0xd9, 0xc2, 0xe9, 0x9b, 0x4b, 0xf3, 0x98, 0x16, 0x46, 0x33, 0x30, 0x2a, 0x4f, 0xe8, 0x5b, 0x54, - 0x40, 0xf4, 0x3d, 0x71, 0x0e, 0x29, 0xad, 0x35, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0xe6, 0x61, 0x6c, - 0xbb, 0xb5, 0x4e, 0x1a, 0x24, 0xe2, 0x1f, 0x7c, 0x9d, 0x70, 0x9d, 0x6e, 0x29, 0xbe, 0x4b, 0x5e, - 0x4f, 0xe0, 0x71, 0xaa, 0x84, 0xfd, 0xd7, 0xec, 0x3c, 0x10, 0xbd, 0x57, 0x09, 0x7c, 0x3a, 0xb1, - 0x28, 0xf7, 0x6f, 0xe6, 0x74, 0xee, 0x66, 0x56, 0x5c, 0x27, 0x7b, 0x6b, 0x3e, 0xbd, 0x4b, 0x64, - 0xcf, 0x0a, 0x63, 0xce, 0xf7, 0xb4, 0x9d, 0xf3, 0xbf, 0x50, 0x80, 0x93, 0xaa, 0x07, 0x0c, 0xb1, - 0xf5, 0x5b, 0xbd, 0x0f, 0xae, 0xc0, 0x60, 0x9d, 0x6c, 0x38, 0xad, 0x46, 0xa4, 0x0c, 0x0c, 0xbd, - 0xdc, 0xc8, 0x34, 0x1f, 0x83, 0xb1, 0x4e, 0x73, 0x88, 0x6e, 0xfb, 0xf9, 0x61, 0x76, 0x10, 0x47, - 0x0e, 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x75, 0x77, 0xa8, 0x60, 0x56, - 0x30, 0xe5, 0xad, 0x25, 0x0a, 0xc4, 0x1c, 0x87, 0x9e, 0x80, 0xfe, 0x9a, 0xbf, 0xb3, 0xe3, 0x78, - 0x75, 0x76, 0xe4, 0x95, 0x66, 0x07, 0xa9, 0xec, 0x36, 0xc7, 0x41, 0x58, 0xe2, 0xd0, 0x59, 0xe8, - 0x71, 0x82, 0x4d, 0xae, 0x75, 0x29, 0xcd, 0x0e, 0xd0, 0x9a, 0x66, 0x82, 0xcd, 0x10, 0x33, 0x28, - 0xbd, 0x34, 0xde, 0xf1, 0x83, 0x6d, 0xd7, 0xdb, 0x9c, 0x77, 0x03, 0xb1, 0x24, 0xd4, 0x59, 0x78, - 0x5b, 0x61, 0xb0, 0x46, 0x85, 0x16, 0xa1, 0xb7, 0xe9, 0x07, 0x51, 0x38, 0xd9, 0xc7, 0xba, 0xfb, - 0xd1, 0x9c, 0x8d, 0x88, 0x7f, 0x6d, 0xc5, 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xe2, - 0x68, 0x19, 0xfa, 0x89, 0xb7, 0xbb, 0x18, 0xf8, 0x3b, 0x93, 0x13, 0xf9, 0x9c, 0x16, 0x38, 0x09, - 0x9f, 0x66, 0xb1, 0x8c, 0x2a, 0xc0, 0x58, 0xb2, 0x40, 0x1f, 0x82, 0x22, 0xf1, 0x76, 0x27, 0xfb, - 0x19, 0xa7, 0xa9, 0x1c, 0x4e, 0xb7, 0x9c, 0x20, 0xde, 0xf3, 0x17, 0xbc, 0x5d, 0x4c, 0xcb, 0xa0, - 0x8f, 0x43, 0x49, 0x6e, 0x18, 0xa1, 0x50, 0x67, 0x66, 0x4e, 0x58, 0xb9, 0xcd, 0x60, 0xf2, 0x76, - 0xcb, 0x0d, 0xc8, 0x0e, 0xf1, 0xa2, 0x30, 0xde, 0x21, 0x25, 0x36, 0xc4, 0x31, 0x37, 0x54, 0x83, - 0xa1, 0x80, 0x84, 0xee, 0x3d, 0x52, 0xf1, 0x1b, 0x6e, 0x6d, 0x6f, 0xf2, 0x34, 0x6b, 0xde, 0x53, - 0x6d, 0xbb, 0x0c, 0x6b, 0x05, 0x62, 0x75, 0xbb, 0x0e, 0xc5, 0x06, 0x53, 0xf4, 0x06, 0x0c, 0x07, - 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xb5, 0x4c, 0x2a, 0xf3, 0xd8, 0x30, 0xd6, 0x11, 0xfc, 0x3a, 0x11, - 0x57, 0x13, 0x63, 0xb0, 0xc9, 0x01, 0x7d, 0x5c, 0xea, 0xfe, 0x57, 0xfc, 0x96, 0x17, 0x85, 0x93, - 0x25, 0xd6, 0xee, 0x4c, 0xab, 0xec, 0xad, 0x98, 0x2e, 0x69, 0x1c, 0xe0, 0x85, 0xb1, 0xc1, 0x0a, - 0x7d, 0x12, 0x86, 0xf9, 0x7f, 0x6e, 0xdb, 0x0c, 0x27, 0x4f, 0x32, 0xde, 0x17, 0xf2, 0x79, 0x73, - 0xc2, 0xd9, 0x93, 0x82, 0xf9, 0xb0, 0x0e, 0x0d, 0xb1, 0xc9, 0x0d, 0x61, 0x18, 0x6e, 0xb8, 0xbb, - 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0x75, 0x22, 0x54, 0xb5, 0x67, 0xb2, 0x6d, 0xa1, 0xfe, 0x3a, - 0x99, 0x1d, 0xa7, 0x3c, 0x97, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, 0x08, 0xbd, 0x1b, 0xbb, - 0x31, 0xd3, 0xc1, 0x4e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, 0xba, 0x01, 0x43, - 0xac, 0xcf, 0x5b, 0x4d, 0xce, 0xf4, 0x54, 0x27, 0xa6, 0xcc, 0x94, 0x5e, 0xd5, 0x8a, 0x60, 0x83, - 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x83, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, 0x39, 0xc4, 0xb8, 0x65, - 0x6e, 0x86, 0xcb, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, 0x5b, 0x70, 0x2a, 0x22, - 0xc1, 0x8e, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, 0x66, 0xa2, 0x1e, 0x66, 0xb3, 0xeb, 0xbc, - 0x18, 0x8d, 0x53, 0x6b, 0x99, 0x54, 0x38, 0xa7, 0x34, 0xba, 0x0b, 0x93, 0x19, 0x18, 0x3e, 0x6f, - 0x4f, 0x30, 0xce, 0x1f, 0x16, 0x9c, 0x27, 0xd7, 0x72, 0xe8, 0x0e, 0xda, 0xe0, 0x70, 0x2e, 0x77, - 0x74, 0x03, 0x46, 0xd9, 0xce, 0x59, 0x69, 0x35, 0x1a, 0xa2, 0xc2, 0x11, 0x56, 0xe1, 0x13, 0x52, - 0x8e, 0x58, 0x32, 0xd1, 0x07, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, 0xb4, 0xce, 0xac, 0xa1, - 0xad, 0xc0, 0x8d, 0xf6, 0xe8, 0xaa, 0x22, 0x77, 0xa3, 0xc9, 0xd1, 0xb6, 0x9a, 0x21, 0x9d, 0x54, - 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0xd5, 0x5d, 0x6f, 0x72, 0x8c, 0xdf, - 0xa7, 0xe4, 0x4e, 0x5a, 0xa5, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0xdc, 0xa0, 0x27, 0xee, - 0x38, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, 0xde, 0x24, 0x62, - 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x38, 0xa6, 0x70, 0x7b, 0x1d, 0x46, 0xd4, 0x36, 0xc1, 0xfa, - 0x04, 0x95, 0xa1, 0x97, 0x89, 0x7d, 0x42, 0x8f, 0x59, 0xa2, 0x4d, 0x60, 0x22, 0x21, 0xe6, 0x70, - 0xd6, 0x04, 0xf7, 0x1e, 0x99, 0xdd, 0x8b, 0x08, 0xd7, 0x45, 0x14, 0xb5, 0x26, 0x48, 0x04, 0x8e, - 0x69, 0xec, 0xff, 0xc9, 0xc5, 0xe7, 0xf8, 0x94, 0xe8, 0xe2, 0x5c, 0x7c, 0x16, 0x06, 0xb6, 0xfc, - 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xde, 0x58, 0x60, 0xbe, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, - 0xb8, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x03, - 0xcc, 0xbb, 0xa7, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, 0x93, 0x81, 0x8a, 0x80, 0x1f, 0x68, 0xbf, - 0xb1, 0xa2, 0x46, 0x17, 0xa1, 0x8f, 0x36, 0x61, 0xa9, 0x22, 0x8e, 0x53, 0xa5, 0x92, 0xbb, 0xc6, - 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb3, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x35, 0x76, 0x68, 0xb0, - 0x13, 0x44, 0x53, 0x89, 0x3d, 0xae, 0x9d, 0x04, 0x0a, 0x77, 0x90, 0xf8, 0x8f, 0x8d, 0x92, 0xe8, - 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x65, 0x17, 0x24, 0x4f, 0x87, 0x47, 0xe2, 0x23, 0x8e, - 0xb6, 0xa7, 0xdd, 0x11, 0x61, 0xff, 0x5f, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, - 0xfd, 0x77, 0x1c, 0x37, 0x72, 0xbd, 0x4d, 0x21, 0xf7, 0xb5, 0x3f, 0xe8, 0x58, 0xa1, 0xdb, 0xbc, - 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xcb, 0xf3, 0x28, 0xc7, 0x42, 0xb7, - 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x82, 0xd4, - 0x85, 0x57, 0xd0, 0xb3, 0x9d, 0x99, 0xae, 0xa9, 0x32, 0xb3, 0x23, 0x54, 0x36, 0x8a, 0xff, 0x63, - 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x82, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, - 0x26, 0x12, 0x9d, 0xf3, 0x74, 0x77, 0x97, 0xc3, 0x35, 0x77, 0x87, 0xe8, 0xcb, 0x59, 0x30, 0xc1, - 0x31, 0x3f, 0xfb, 0x97, 0x8a, 0x30, 0x99, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x5d, 0x37, 0x9a, 0xa3, - 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x05, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x94, 0x77, - 0xfb, 0xde, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0xb7, 0x33, - 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0x7b, 0x3a, 0x68, 0x19, 0x8d, 0x2e, 0xea, - 0x3d, 0xda, 0x2e, 0x42, 0x9f, 0x02, 0xd8, 0x70, 0x3d, 0x37, 0xdc, 0x62, 0xdc, 0xfb, 0x0e, 0xcd, - 0x5d, 0x09, 0xc5, 0x8b, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x97, 0x60, 0x50, 0x6d, 0x20, 0x4b, 0xf3, - 0xcc, 0x06, 0xaf, 0xf9, 0x34, 0xc5, 0xbb, 0xe9, 0x3c, 0xd6, 0xe9, 0xec, 0xcf, 0x24, 0xe7, 0x8b, - 0x58, 0x01, 0x5a, 0xff, 0x5a, 0xdd, 0xf6, 0x6f, 0xa1, 0x7d, 0xff, 0xda, 0xdf, 0xe8, 0x83, 0x51, - 0xa3, 0xb2, 0x56, 0xd8, 0xc5, 0x9e, 0x7b, 0x95, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0x3b, - 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, 0x52, 0xc3, 0x09, 0x99, 0xc6, - 0x92, 0x88, 0x75, 0xd7, 0x0d, 0xb3, 0xf8, 0x42, 0xe8, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, - 0x4b, 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0xfa, 0x35, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, - 0x7a, 0x99, 0x6d, 0xad, 0x74, 0x56, 0xcc, 0x51, 0x69, 0x94, 0x4d, 0xb3, 0x5e, 0x43, 0xc8, 0x56, - 0x38, 0x6c, 0x50, 0xc6, 0x77, 0xb2, 0xbe, 0x36, 0x77, 0xb2, 0xa7, 0xa0, 0x9f, 0xfd, 0x50, 0x33, - 0x40, 0x8d, 0xc6, 0x12, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0x81, 0xee, 0x26, 0x0c, 0xbd, 0xf5, - 0x89, 0x49, 0xcd, 0xfc, 0x1f, 0x06, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0x33, 0x16, - 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, 0xed, 0x57, 0x3b, 0x76, 0x7b, - 0x2b, 0x9c, 0x9e, 0x49, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, - 0x96, 0xdd, 0x30, 0xfa, 0xfc, 0x1f, 0x25, 0x0e, 0xa7, 0x8c, 0x26, 0xa1, 0x9b, 0xfa, 0xe5, 0x6b, - 0xf0, 0x90, 0x97, 0xaf, 0xe1, 0xbc, 0x8b, 0xd7, 0x54, 0x0b, 0x4e, 0xe7, 0x7c, 0x41, 0x86, 0xfe, - 0x75, 0x5e, 0xd7, 0xbf, 0x76, 0xd0, 0xda, 0x4d, 0xcb, 0x3a, 0xa6, 0xdf, 0x68, 0x39, 0x5e, 0xe4, - 0x46, 0x7b, 0xba, 0xbe, 0xf6, 0x69, 0x18, 0x99, 0x77, 0xc8, 0x8e, 0xef, 0x2d, 0x78, 0xf5, 0xa6, - 0xef, 0x7a, 0x11, 0x9a, 0x84, 0x1e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x87, 0xf6, 0x1e, 0x66, 0x10, - 0x7b, 0x13, 0x4e, 0xce, 0xfb, 0x77, 0xbc, 0x3b, 0x4e, 0x50, 0x9f, 0xa9, 0x2c, 0x69, 0xfa, 0xa4, - 0x55, 0xa9, 0xcf, 0xb0, 0xf2, 0x6f, 0x8b, 0x5a, 0x49, 0x7e, 0x1d, 0x5a, 0x74, 0x1b, 0x24, 0x47, - 0xeb, 0xf7, 0xff, 0x16, 0x8c, 0x9a, 0x62, 0x7a, 0x65, 0x77, 0xb6, 0x72, 0xed, 0xce, 0x6f, 0xc0, - 0xc0, 0x86, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x44, 0xef, 0x3c, 0x99, 0xef, 0x99, 0xb6, 0x48, 0x29, - 0xa5, 0x96, 0x97, 0x6b, 0x43, 0x16, 0x45, 0x61, 0xac, 0xd8, 0xa0, 0x6d, 0x18, 0x93, 0x7d, 0x28, - 0xb1, 0x62, 0x3f, 0x78, 0xaa, 0xdd, 0xc0, 0x9b, 0xcc, 0x4f, 0xdc, 0xdf, 0x2f, 0x8f, 0xe1, 0x04, - 0x1b, 0x9c, 0x62, 0x8c, 0xce, 0x42, 0xcf, 0x0e, 0x3d, 0xf9, 0x7a, 0x58, 0xf7, 0x33, 0xf5, 0x07, - 0xd3, 0xe4, 0x30, 0xa8, 0xfd, 0x63, 0x16, 0x9c, 0x4e, 0xf5, 0x8c, 0xd0, 0x68, 0x1d, 0xf1, 0x28, - 0x24, 0x35, 0x4c, 0x85, 0xce, 0x1a, 0x26, 0xfb, 0x6f, 0x59, 0x70, 0x62, 0x61, 0xa7, 0x19, 0xed, - 0xcd, 0xbb, 0xa6, 0x91, 0xf8, 0x83, 0xd0, 0xb7, 0x43, 0xea, 0x6e, 0x6b, 0x47, 0x8c, 0x5c, 0x59, - 0x9e, 0x0e, 0x2b, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xae, 0x46, 0x7e, 0xe0, 0x6c, 0x12, 0x0e, 0xc0, - 0x82, 0x9c, 0x9d, 0xb1, 0xee, 0x3d, 0xb2, 0xec, 0xee, 0xb8, 0xd1, 0x83, 0xcd, 0x76, 0x61, 0xdf, - 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xa3, 0x72, 0xde, 0xcf, 0xd4, 0xeb, 0x01, 0x09, - 0x43, 0x34, 0x05, 0x05, 0xb7, 0x29, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0xa5, 0x0a, 0x2e, 0xb8, 0x4d, - 0x29, 0xce, 0xb3, 0x03, 0xa8, 0x68, 0x9a, 0xba, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x12, 0x0c, - 0x78, 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x55, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, - 0x94, 0xb8, 0x23, 0x64, 0x3c, 0x69, 0xbb, 0x72, 0xa7, 0x64, 0x5f, 0xb6, 0x26, 0x4b, 0xe2, 0x98, - 0x89, 0xfd, 0x1b, 0x16, 0x0c, 0xc9, 0x2f, 0xeb, 0xf2, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, - 0x97, 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x05, 0x06, 0x9d, - 0x66, 0xb3, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x89, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xb4, 0x00, - 0x23, 0xf2, 0x0b, 0xaa, 0xad, 0xf5, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, - 0xf9, 0x63, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x46, 0x96, 0xc6, 0x31, 0x23, 0xd4, - 0x80, 0x71, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0xed, 0x4c, 0x99, 0x49, 0xee, 0x67, 0x04, 0xf7, - 0xf1, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x16, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, 0x44, 0xd2, 0x07, - 0x2e, 0x5b, 0x15, 0x69, 0xff, 0xaa, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x56, 0xeb, 0x15, 0xe8, 0x0f, - 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0xdb, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, - 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x94, - 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0x52, 0xe4, 0xad, 0x30, - 0x28, 0x16, 0x58, 0xf4, 0x16, 0x0c, 0xd5, 0xa4, 0x0d, 0x22, 0x5e, 0xe1, 0x17, 0xdb, 0xda, 0xc3, - 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0xe6, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, 0x14, 0x3b, 0x39, - 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x3e, 0xae, 0x7b, 0xee, 0x4e, 0xf5, 0xaf, - 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x45, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, 0x3f, 0x98, 0xee, - 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x96, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, - 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xf0, 0x0e, 0xfd, 0xc2, 0xc3, 0x3a, 0xf4, - 0x37, 0x61, 0xb4, 0xa6, 0xd9, 0x9d, 0xe3, 0x91, 0xbc, 0xd4, 0x76, 0x92, 0x68, 0x26, 0x6a, 0xae, - 0x9d, 0x9b, 0x33, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x09, 0x18, 0xe2, 0xe3, 0x2c, 0x6a, 0xe1, 0xbe, - 0x52, 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0x97, - 0x16, 0xa0, 0x85, 0xe6, 0x16, 0xd9, 0x21, 0x81, 0xd3, 0x88, 0xcd, 0x47, 0x3f, 0x68, 0xc1, 0x24, - 0x49, 0x81, 0xe7, 0xfc, 0x9d, 0x1d, 0x71, 0x59, 0xcc, 0xd1, 0x67, 0x2c, 0xe4, 0x94, 0x51, 0x0f, - 0x95, 0x26, 0xf3, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x05, 0x26, 0xf8, 0x29, 0xa9, 0x10, 0x9a, 0xdf, - 0xd5, 0x23, 0x82, 0xf1, 0xc4, 0x5a, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x75, 0x18, 0x72, 0x5b, - 0xf1, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, - 0xde, 0xb3, 0x9b, 0xbd, 0x4b, 0xed, 0x66, 0xff, 0xb7, 0x05, 0x27, 0xd5, 0xf1, 0x65, 0x5c, 0xd8, - 0x3f, 0x0b, 0x13, 0x7c, 0xb9, 0x19, 0x3e, 0xc6, 0xe2, 0xb8, 0xbe, 0x92, 0x39, 0x73, 0x13, 0xbe, - 0xf0, 0x46, 0x41, 0xfe, 0xa8, 0x28, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x69, 0x00, 0x7a, 0x17, - 0x76, 0x89, 0x17, 0x1d, 0xc3, 0xd5, 0xa6, 0x06, 0x23, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, - 0xe3, 0x0f, 0x73, 0x03, 0x3f, 0x25, 0x58, 0x8f, 0x2c, 0x19, 0x2c, 0x70, 0x82, 0xe5, 0xc3, 0xb0, - 0x3e, 0x5c, 0x85, 0x3e, 0x7e, 0xf8, 0x08, 0xd3, 0x43, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x1c, 0xa9, - 0xb1, 0x65, 0x84, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6c, 0xb8, 0x41, 0x18, 0xad, 0xb9, - 0x3b, 0xf4, 0x68, 0xd8, 0x69, 0x3e, 0x80, 0xb5, 0x41, 0xf5, 0xc3, 0xa2, 0xc1, 0x09, 0x27, 0x38, - 0xa3, 0x4d, 0x18, 0x6e, 0x38, 0x7a, 0x55, 0xfd, 0x87, 0xae, 0x4a, 0x9d, 0x0e, 0xcb, 0x3a, 0x23, - 0x6c, 0xf2, 0xa5, 0xcb, 0xa9, 0xc6, 0x14, 0xe6, 0x03, 0x4c, 0x9d, 0xa1, 0x96, 0x13, 0xd7, 0x94, - 0x73, 0x1c, 0x15, 0xd0, 0x98, 0x23, 0x7b, 0xc9, 0x14, 0xd0, 0x34, 0x77, 0xf5, 0x4f, 0x43, 0x89, - 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xb9, 0xbb, 0xb6, 0xae, 0xb8, 0xb5, 0xc0, 0x37, 0xed, - 0x3c, 0x0b, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x39, 0xe8, 0x0b, 0x49, 0xe0, 0x2a, 0x5d, 0x72, 0x9b, - 0x61, 0x64, 0x64, 0xfc, 0xd5, 0x1a, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, 0xa6, 0x8a, 0x65, - 0x87, 0x81, 0x36, 0xbd, 0x66, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x1d, 0xfa, 0x03, 0xd2, 0x60, 0x86, - 0xc4, 0xe1, 0xee, 0x27, 0x39, 0xb7, 0x4b, 0xf2, 0x72, 0x58, 0x32, 0x40, 0xd7, 0x01, 0x05, 0x84, - 0x0a, 0x78, 0xae, 0xb7, 0xa9, 0xdc, 0xbb, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, 0x53, 0xc8, 0x07, - 0x8b, 0x38, 0xa3, 0x18, 0xba, 0x0a, 0xe3, 0x0a, 0xba, 0xe4, 0x85, 0x91, 0x43, 0x37, 0xb8, 0x51, - 0xc6, 0x4b, 0xe9, 0x57, 0x70, 0x92, 0x00, 0xa7, 0xcb, 0xd8, 0x3f, 0x67, 0x01, 0xef, 0xe7, 0x63, - 0xd0, 0x2a, 0xbc, 0x66, 0x6a, 0x15, 0xce, 0xe4, 0x8e, 0x5c, 0x8e, 0x46, 0xe1, 0xe7, 0x2c, 0x18, - 0xd4, 0x46, 0x36, 0x9e, 0xb3, 0x56, 0x9b, 0x39, 0xdb, 0x82, 0x31, 0x3a, 0xd3, 0x6f, 0xac, 0x87, - 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0x0f, 0x36, 0x31, 0x95, 0x2b, 0xe9, 0x72, 0x82, 0x21, - 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd4, 0x98, 0x27, 0x3c, 0x6f, 0xd5, - 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x2d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0xd7, 0xfc, 0x30, 0xc2, - 0x0c, 0x63, 0xbf, 0x00, 0xb0, 0x70, 0x97, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, 0x56, 0xfe, 0xa5, - 0xc7, 0xfe, 0x3d, 0x0b, 0x46, 0x16, 0xe7, 0x8c, 0x93, 0x6b, 0x1a, 0x80, 0xdf, 0xd4, 0x6e, 0xdf, - 0x5e, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x67, 0xa0, 0xd8, 0x68, 0x79, - 0x42, 0xed, 0xd9, 0x4f, 0x8f, 0xc7, 0xe5, 0x96, 0x87, 0x29, 0x4c, 0x7b, 0xac, 0x54, 0xec, 0xfa, - 0xb1, 0x52, 0xc7, 0x20, 0x25, 0xa8, 0x0c, 0xbd, 0x77, 0xee, 0xb8, 0x75, 0xfe, 0x14, 0x5c, 0xb8, - 0xa6, 0xdc, 0xbe, 0xbd, 0x34, 0x1f, 0x62, 0x0e, 0xb7, 0xbf, 0x58, 0x84, 0xa9, 0xc5, 0x06, 0xb9, - 0xfb, 0x0e, 0x9f, 0xc3, 0x77, 0xfb, 0xd4, 0xea, 0x70, 0x0a, 0xa4, 0xc3, 0x3e, 0xa7, 0xeb, 0xdc, - 0x1f, 0x1b, 0xd0, 0xcf, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, 0x87, 0x4c, 0x73, - 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xea, 0x15, 0x18, 0xd2, 0x29, - 0x0f, 0xf5, 0xb0, 0xf5, 0xbb, 0x8b, 0x30, 0x46, 0x5b, 0xf0, 0x50, 0x07, 0xe2, 0x66, 0x7a, 0x20, - 0x8e, 0xfa, 0x71, 0x63, 0xe7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x57, 0xf2, 0x46, 0xe3, 0xb8, 0xc7, - 0xe0, 0x7b, 0x2c, 0x98, 0x58, 0x6c, 0xf8, 0xb5, 0xed, 0xc4, 0x03, 0xc4, 0x97, 0x60, 0x90, 0x6e, - 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, 0xdd, 0xbc, 0xb9, - 0x34, 0x9f, 0x15, 0xd4, 0x45, 0xa0, 0xb0, 0x4e, 0x67, 0xff, 0x8e, 0x05, 0xe7, 0xae, 0xce, 0x2d, - 0xc4, 0x53, 0x31, 0x15, 0x57, 0xe6, 0x22, 0xf4, 0x35, 0xeb, 0x5a, 0x53, 0x62, 0xb5, 0xf0, 0x3c, - 0x6b, 0x85, 0xc0, 0xbe, 0x5b, 0x62, 0x26, 0xdd, 0x04, 0xb8, 0x8a, 0x2b, 0x73, 0x62, 0xdf, 0x95, - 0x56, 0x20, 0x2b, 0xd7, 0x0a, 0xf4, 0x04, 0xf4, 0xd3, 0x73, 0xc1, 0xad, 0xc9, 0x76, 0x73, 0x83, - 0x3e, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xd6, 0x82, 0x89, 0xab, 0x6e, 0x44, 0x0f, 0xed, 0x64, 0xe0, - 0x14, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0xc0, 0x29, 0x58, 0x61, 0xb0, 0x46, 0xc5, - 0x3f, 0x68, 0xd7, 0x65, 0x2f, 0x29, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, - 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0x35, 0x2f, 0x11, 0x38, 0xa6, 0xb1, - 0xff, 0xdc, 0x82, 0xf2, 0xd5, 0x46, 0x2b, 0x8c, 0x48, 0xb0, 0x11, 0xe6, 0x6c, 0xba, 0x2f, 0x40, - 0x89, 0x48, 0x03, 0x81, 0x7c, 0xf2, 0x29, 0x05, 0x51, 0x65, 0x39, 0xe0, 0xf1, 0x5b, 0x14, 0x5d, - 0x17, 0xaf, 0xa4, 0x0f, 0xf7, 0xcc, 0x75, 0x11, 0x10, 0xd1, 0xeb, 0xd2, 0x03, 0xda, 0xb0, 0xc8, - 0x18, 0x0b, 0x29, 0x2c, 0xce, 0x28, 0x61, 0xff, 0x98, 0x05, 0x27, 0xd5, 0x07, 0xbf, 0xeb, 0x3e, - 0xd3, 0xfe, 0x6a, 0x01, 0x86, 0xaf, 0xad, 0xad, 0x55, 0xae, 0x92, 0x48, 0x9b, 0x95, 0xed, 0xcd, - 0xfe, 0x58, 0xb3, 0x5e, 0xb6, 0xbb, 0x23, 0xb6, 0x22, 0xb7, 0x31, 0xcd, 0xe3, 0xa2, 0x4d, 0x2f, - 0x79, 0xd1, 0x8d, 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x99, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, - 0x59, 0xd0, 0x0b, 0xd0, 0xc7, 0x02, 0xb3, 0xc9, 0x41, 0x78, 0x44, 0x5d, 0xb1, 0x18, 0xf4, 0x60, - 0xbf, 0x5c, 0xba, 0x89, 0x97, 0xf8, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xdc, 0x8a, 0xa2, 0xe6, - 0x35, 0xe2, 0xd4, 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, - 0x4c, 0x31, 0x2c, 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc8, 0x70, 0x63, 0xaf, 0x41, - 0x89, 0x7e, 0xee, 0x4c, 0xc3, 0x75, 0xda, 0x9b, 0xc6, 0x9f, 0x81, 0x92, 0x34, 0x7c, 0x87, 0x22, - 0x8a, 0x03, 0x3b, 0x91, 0xa4, 0x5d, 0x3c, 0xc4, 0x31, 0xde, 0x7e, 0x1c, 0x84, 0x6f, 0x69, 0x3b, - 0x96, 0xf6, 0x06, 0x9c, 0x60, 0x4e, 0xb2, 0x4e, 0xb4, 0x65, 0xcc, 0xd1, 0xce, 0x93, 0xe1, 0x59, - 0x71, 0xaf, 0xe3, 0x5f, 0x36, 0xa9, 0x3d, 0x4e, 0x1e, 0x92, 0x1c, 0xe3, 0x3b, 0x9e, 0xfd, 0x67, - 0x3d, 0xf0, 0xc8, 0x52, 0x35, 0x3f, 0xfc, 0xd0, 0xcb, 0x30, 0xc4, 0xc5, 0x45, 0x3a, 0x35, 0x9c, - 0x86, 0xa8, 0x57, 0x69, 0x40, 0xd7, 0x34, 0x1c, 0x36, 0x28, 0xd1, 0x39, 0x28, 0xba, 0x6f, 0x7b, - 0xc9, 0xa7, 0x7b, 0x4b, 0x6f, 0xac, 0x62, 0x0a, 0xa7, 0x68, 0x2a, 0x79, 0xf2, 0x2d, 0x5d, 0xa1, - 0x95, 0xf4, 0xf9, 0x1a, 0x8c, 0xb8, 0x61, 0x2d, 0x74, 0x97, 0x3c, 0xba, 0x4e, 0xb5, 0x95, 0xae, - 0x74, 0x0e, 0xb4, 0xd1, 0x0a, 0x8b, 0x13, 0xd4, 0xda, 0xf9, 0xd2, 0xdb, 0xb5, 0xf4, 0xda, 0x31, - 0xf8, 0x01, 0xdd, 0xfe, 0x9b, 0xec, 0xeb, 0x42, 0xa6, 0x82, 0x17, 0xdb, 0x3f, 0xff, 0xe0, 0x10, - 0x4b, 0x1c, 0xbd, 0xd0, 0xd5, 0xb6, 0x9c, 0xe6, 0x4c, 0x2b, 0xda, 0x9a, 0x77, 0xc3, 0x9a, 0xbf, - 0x4b, 0x82, 0x3d, 0x76, 0x17, 0x1f, 0x88, 0x2f, 0x74, 0x0a, 0x31, 0x77, 0x6d, 0xa6, 0x42, 0x29, - 0x71, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x09, 0xac, 0x92, 0x90, 0x1d, 0x01, 0x83, 0x8c, 0x8d, 0x7a, - 0x4c, 0x27, 0xc0, 0x8a, 0x49, 0x92, 0xde, 0x14, 0x70, 0xe1, 0x28, 0x04, 0xdc, 0x0f, 0xc2, 0xb0, - 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xb7, 0x1f, 0xf1, 0x6b, 0x37, 0x53, 0x30, 0x2f, 0xe9, 0x08, - 0x6c, 0xd2, 0xd9, 0xff, 0xb6, 0x07, 0xc6, 0xd9, 0xb0, 0xbd, 0x37, 0xc3, 0xbe, 0x9d, 0x66, 0xd8, - 0xcd, 0xf4, 0x0c, 0x3b, 0x0a, 0xc9, 0xfd, 0x81, 0xa7, 0xd9, 0x67, 0xa0, 0xa4, 0xde, 0x0f, 0xca, - 0x07, 0xc4, 0x56, 0xce, 0x03, 0xe2, 0xce, 0xa7, 0xb7, 0x74, 0x49, 0x2b, 0x66, 0xba, 0xa4, 0x7d, - 0xd9, 0x82, 0xd8, 0xb0, 0x80, 0xde, 0x80, 0x52, 0xd3, 0x67, 0x1e, 0xae, 0x81, 0x74, 0x1b, 0x7f, - 0xbc, 0xad, 0x65, 0x82, 0x47, 0x60, 0x0b, 0x78, 0x2f, 0x54, 0x64, 0x51, 0x1c, 0x73, 0x41, 0xd7, - 0xa1, 0xbf, 0x19, 0x90, 0x6a, 0xc4, 0xc2, 0x03, 0x75, 0xcf, 0x90, 0xcf, 0x1a, 0x5e, 0x10, 0x4b, - 0x0e, 0xf6, 0xbf, 0xb7, 0x60, 0x2c, 0x49, 0x8a, 0x3e, 0x0c, 0x3d, 0xe4, 0x2e, 0xa9, 0x89, 0xf6, - 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x0d, 0xfa, 0xe9, - 0x39, 0x7c, 0x55, 0x85, 0xc2, 0x7b, 0x34, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, 0x00, 0x61, - 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x9a, 0x55, 0x7a, 0xc5, 0x89, 0xda, 0xdd, 0xc4, 0xd7, 0xe6, 0x2a, - 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xf6, 0x2f, 0x58, 0x00, 0xdc, 0xed, - 0xcd, 0xf1, 0x36, 0xc9, 0x31, 0x68, 0xd3, 0xe7, 0xa1, 0x27, 0x6c, 0x92, 0x5a, 0x3b, 0xe7, 0xeb, - 0xb8, 0x3d, 0xd5, 0x26, 0xa9, 0xc5, 0x33, 0x8e, 0xfe, 0xc3, 0xac, 0xb4, 0xfd, 0xbd, 0x00, 0x23, - 0x31, 0xd9, 0x52, 0x44, 0x76, 0xd0, 0x73, 0x46, 0xd0, 0x91, 0x33, 0x89, 0xa0, 0x23, 0x25, 0x46, - 0xad, 0x29, 0x6e, 0x3f, 0x03, 0xc5, 0x1d, 0xe7, 0xae, 0xd0, 0xcc, 0x3d, 0xd3, 0xbe, 0x19, 0x94, - 0xff, 0xf4, 0x8a, 0x73, 0x97, 0x5f, 0x5e, 0x9f, 0x91, 0x2b, 0x64, 0xc5, 0xb9, 0xdb, 0xd1, 0x41, - 0x98, 0x56, 0xc2, 0xea, 0x72, 0x3d, 0xe1, 0xd1, 0xd5, 0x55, 0x5d, 0xae, 0x97, 0xac, 0xcb, 0xf5, - 0xba, 0xa8, 0xcb, 0xf5, 0xd0, 0x3d, 0xe8, 0x17, 0x0e, 0x97, 0x22, 0x2c, 0xd9, 0xe5, 0x2e, 0xea, - 0x13, 0xfe, 0x9a, 0xbc, 0xce, 0xcb, 0xf2, 0x72, 0x2e, 0xa0, 0x1d, 0xeb, 0x95, 0x15, 0xa2, 0xff, - 0xc7, 0x82, 0x11, 0xf1, 0x1b, 0x93, 0xb7, 0x5b, 0x24, 0x8c, 0x84, 0xf0, 0xfa, 0x81, 0xee, 0xdb, - 0x20, 0x0a, 0xf2, 0xa6, 0x7c, 0x40, 0x9e, 0x33, 0x26, 0xb2, 0x63, 0x8b, 0x12, 0xad, 0x40, 0x7f, - 0xdb, 0x82, 0x13, 0x3b, 0xce, 0x5d, 0x5e, 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xb8, 0xf0, - 0xe1, 0xee, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0x5a, 0x29, 0x4f, 0x64, 0x91, 0x74, 0x6c, 0x6a, - 0x66, 0xbb, 0xa6, 0x36, 0x60, 0x40, 0xce, 0xb7, 0x87, 0xe9, 0xdd, 0xcd, 0xea, 0x11, 0x73, 0xed, - 0xa1, 0xd6, 0xf3, 0x19, 0x18, 0xd2, 0xe7, 0xd8, 0x43, 0xad, 0xeb, 0x6d, 0x98, 0xc8, 0x98, 0x4b, - 0x0f, 0xb5, 0xca, 0x3b, 0x70, 0x26, 0x77, 0x7e, 0x3c, 0x54, 0xef, 0xfc, 0xaf, 0x5a, 0xfa, 0x3e, - 0x78, 0x0c, 0x26, 0x8d, 0x39, 0xd3, 0xa4, 0x71, 0xbe, 0xfd, 0xca, 0xc9, 0xb1, 0x6b, 0xbc, 0xa5, - 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x75, 0xe8, 0x6b, 0x50, 0x88, 0x74, 0xdb, 0xb5, 0x3b, 0xaf, 0xc8, - 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x5b, 0xd0, 0x73, 0x0c, 0x3d, 0x81, 0xcd, - 0x9e, 0x78, 0x2e, 0x97, 0xb5, 0x88, 0xd0, 0x3e, 0x8d, 0x9d, 0x3b, 0x0b, 0x77, 0x23, 0xe2, 0x85, - 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb2, 0x60, 0x62, 0xd9, 0x77, 0xea, 0xb3, 0x4e, 0xc3, 0xf1, - 0x6a, 0x24, 0x58, 0xf2, 0x36, 0x0f, 0xe5, 0x73, 0x5e, 0xe8, 0xe8, 0x73, 0x3e, 0x27, 0x5d, 0xb6, - 0x7a, 0xf2, 0xc7, 0x8f, 0x4a, 0xd2, 0xc9, 0x30, 0x4c, 0x86, 0x73, 0xf1, 0x16, 0x20, 0xbd, 0x95, - 0xe2, 0xe5, 0x15, 0x86, 0x7e, 0x97, 0xb7, 0x57, 0x0c, 0xe2, 0x93, 0xd9, 0x12, 0x6e, 0xea, 0xf3, - 0xb4, 0x37, 0x45, 0x1c, 0x80, 0x25, 0x23, 0xfb, 0x65, 0xc8, 0x0c, 0x9b, 0xd1, 0x59, 0x7b, 0x61, - 0x7f, 0x1c, 0xc6, 0x59, 0xc9, 0x43, 0x6a, 0x06, 0xec, 0x84, 0xce, 0x35, 0x23, 0x04, 0xa8, 0xfd, - 0x05, 0x0b, 0x46, 0x57, 0x13, 0x91, 0x11, 0x2f, 0x32, 0x2b, 0x6d, 0x86, 0xaa, 0xbf, 0xca, 0xa0, - 0x58, 0x60, 0x8f, 0x5c, 0x15, 0xf6, 0xd7, 0x16, 0xc4, 0x91, 0x6c, 0x8e, 0x41, 0x7c, 0x9b, 0x33, - 0xc4, 0xb7, 0x4c, 0x41, 0x56, 0x35, 0x27, 0x4f, 0x7a, 0x43, 0xd7, 0x55, 0x8c, 0xb7, 0x36, 0x32, - 0x6c, 0xcc, 0x86, 0x4f, 0xc5, 0x11, 0x33, 0x10, 0x9c, 0x8c, 0xfa, 0x66, 0xff, 0x7e, 0x01, 0x90, - 0xa2, 0xed, 0x3a, 0x06, 0x5d, 0xba, 0xc4, 0xd1, 0xc4, 0xa0, 0xdb, 0x05, 0xc4, 0xfc, 0x0c, 0x02, - 0xc7, 0x0b, 0x39, 0x5b, 0x57, 0x28, 0xff, 0x0e, 0xe7, 0xc4, 0x30, 0x25, 0x1f, 0xa5, 0x2d, 0xa7, - 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xff, 0x91, 0xde, 0x6e, 0xfd, 0x47, 0xfa, 0x3a, 0xbc, 0xae, 0xfc, - 0x8a, 0x05, 0xc3, 0xaa, 0x9b, 0xde, 0x25, 0x3e, 0xf8, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, - 0x32, 0x3b, 0x58, 0xbe, 0x83, 0xbd, 0x92, 0x75, 0x1a, 0xee, 0x3d, 0xa2, 0x62, 0x96, 0x96, 0xc5, - 0xab, 0x57, 0x01, 0x3d, 0xd8, 0x2f, 0x0f, 0xab, 0x7f, 0x3c, 0x26, 0x7b, 0x5c, 0x84, 0x6e, 0xc9, - 0xa3, 0x89, 0xa9, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, 0x92, 0xc4, 0x5b, 0xa5, 0xde, 0x0a, - 0x05, 0x1e, 0xec, 0x97, 0x47, 0x54, 0x01, 0x06, 0xc1, 0x9c, 0xba, 0xfb, 0xc8, 0x7e, 0xe9, 0xc9, - 0xd9, 0x31, 0xb2, 0xdf, 0x5f, 0x5a, 0xd0, 0xb3, 0xea, 0xd7, 0x8f, 0x63, 0x0b, 0x78, 0xcd, 0xd8, - 0x02, 0xce, 0xe6, 0xa5, 0xcb, 0xc8, 0x5d, 0xfd, 0x8b, 0x89, 0xd5, 0x7f, 0x3e, 0x97, 0x43, 0xfb, - 0x85, 0xbf, 0x03, 0x83, 0x2c, 0x09, 0x87, 0x78, 0x97, 0xf5, 0x82, 0xb1, 0xe0, 0xcb, 0x89, 0x05, - 0x3f, 0xaa, 0x91, 0x6a, 0x2b, 0xfd, 0x29, 0xe8, 0x17, 0x0f, 0x7d, 0x92, 0x8f, 0x8d, 0x05, 0x2d, - 0x96, 0x78, 0xfb, 0xc7, 0x8b, 0x60, 0x24, 0xfd, 0x40, 0xbf, 0x6a, 0xc1, 0x74, 0xc0, 0x1d, 0x80, - 0xeb, 0xf3, 0xad, 0xc0, 0xf5, 0x36, 0xab, 0xb5, 0x2d, 0x52, 0x6f, 0x35, 0x5c, 0x6f, 0x73, 0x69, - 0xd3, 0xf3, 0x15, 0x78, 0xe1, 0x2e, 0xa9, 0xb5, 0x98, 0x71, 0xae, 0x43, 0x86, 0x11, 0xe5, 0x48, - 0xff, 0xfc, 0xfd, 0xfd, 0xf2, 0x34, 0x3e, 0x14, 0x6f, 0x7c, 0xc8, 0xb6, 0xa0, 0xdf, 0xb1, 0xe0, - 0x32, 0xcf, 0x85, 0xd1, 0x7d, 0xfb, 0xdb, 0xdc, 0x96, 0x2b, 0x92, 0x55, 0xcc, 0x64, 0x8d, 0x04, - 0x3b, 0xb3, 0x1f, 0x14, 0x1d, 0x7a, 0xb9, 0x72, 0xb8, 0xba, 0xf0, 0x61, 0x1b, 0x67, 0xff, 0xc3, - 0x22, 0x0c, 0x8b, 0x08, 0x70, 0xe2, 0x0c, 0x78, 0xc9, 0x98, 0x12, 0x8f, 0x26, 0xa6, 0xc4, 0xb8, - 0x41, 0x7c, 0x34, 0xdb, 0x7f, 0x08, 0xe3, 0x74, 0x73, 0xbe, 0x46, 0x9c, 0x20, 0x5a, 0x27, 0x0e, - 0x77, 0x0b, 0x2b, 0x1e, 0x7a, 0xf7, 0x57, 0xfa, 0xc9, 0xe5, 0x24, 0x33, 0x9c, 0xe6, 0xff, 0xed, - 0x74, 0xe6, 0x78, 0x30, 0x96, 0x0a, 0xe2, 0xf7, 0x26, 0x94, 0xd4, 0x2b, 0x15, 0xb1, 0xe9, 0xb4, - 0x8f, 0x85, 0x99, 0xe4, 0xc0, 0xd5, 0x5f, 0xf1, 0x0b, 0xa9, 0x98, 0x9d, 0xfd, 0x77, 0x0b, 0x46, - 0x85, 0x7c, 0x10, 0x57, 0x61, 0xc0, 0x09, 0x43, 0x77, 0xd3, 0x23, 0xf5, 0x76, 0x1a, 0xca, 0x54, - 0x35, 0xec, 0xa5, 0xd0, 0x8c, 0x28, 0x89, 0x15, 0x0f, 0x74, 0x8d, 0x3b, 0xdf, 0xed, 0x92, 0x76, - 0xea, 0xc9, 0x14, 0x37, 0x90, 0xee, 0x79, 0xbb, 0x04, 0x8b, 0xf2, 0xe8, 0x93, 0xdc, 0x3b, 0xf2, - 0xba, 0xe7, 0xdf, 0xf1, 0xae, 0xfa, 0xbe, 0x8c, 0xf6, 0xd1, 0x1d, 0xc3, 0x71, 0xe9, 0x13, 0xa9, - 0x8a, 0x63, 0x93, 0x5b, 0x77, 0x51, 0x71, 0x3f, 0x0b, 0x2c, 0xf6, 0xbf, 0xf9, 0x28, 0x3c, 0x44, - 0x04, 0x46, 0x45, 0x78, 0x41, 0x09, 0x13, 0x7d, 0x97, 0x79, 0x95, 0x33, 0x4b, 0xc7, 0x8a, 0xf4, - 0xeb, 0x26, 0x0b, 0x9c, 0xe4, 0x69, 0xff, 0x8c, 0x05, 0xec, 0x81, 0xec, 0x31, 0xc8, 0x23, 0x1f, - 0x31, 0xe5, 0x91, 0xc9, 0xbc, 0x4e, 0xce, 0x11, 0x45, 0x5e, 0xe4, 0x33, 0xab, 0x12, 0xf8, 0x77, - 0xf7, 0x84, 0x4b, 0x4b, 0xe7, 0xfb, 0x87, 0xfd, 0xdf, 0x2d, 0xbe, 0x89, 0xc5, 0xe1, 0x04, 0x3e, - 0x07, 0x03, 0x35, 0xa7, 0xe9, 0xd4, 0x78, 0x86, 0xaa, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x7a, 0x4e, - 0x94, 0xe0, 0x1a, 0x2a, 0x19, 0xa6, 0x72, 0x40, 0x82, 0x3b, 0x6a, 0xa5, 0x54, 0x95, 0x53, 0xdb, - 0x30, 0x6c, 0x30, 0x7b, 0xa8, 0xea, 0x8c, 0xcf, 0xf1, 0x23, 0x56, 0x85, 0x55, 0xdd, 0x81, 0x71, - 0x4f, 0xfb, 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xd3, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0xed, - 0x6d, 0x82, 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xc2, 0x82, 0xd3, 0x3a, 0xa1, 0xf6, 0xbc, 0xa7, 0x93, - 0x91, 0x64, 0x1e, 0x06, 0xfc, 0x26, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, - 0x0d, 0x01, 0x3f, 0x10, 0xf9, 0x16, 0x24, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, - 0x08, 0xc5, 0x43, 0x2e, 0xb6, 0x07, 0x30, 0x7b, 0x7b, 0x88, 0x05, 0xc6, 0xfe, 0x33, 0x8b, 0x4f, - 0x2c, 0xbd, 0xe9, 0xe8, 0x6d, 0x18, 0xdb, 0x71, 0xa2, 0xda, 0xd6, 0xc2, 0xdd, 0x66, 0xc0, 0x4d, - 0x4e, 0xb2, 0x9f, 0x9e, 0xe9, 0xd4, 0x4f, 0xda, 0x47, 0xc6, 0x0e, 0x9f, 0x2b, 0x09, 0x66, 0x38, - 0xc5, 0x1e, 0xad, 0xc3, 0x20, 0x83, 0xb1, 0x37, 0x8a, 0x61, 0x3b, 0xd1, 0x20, 0xaf, 0x36, 0xe5, - 0xb2, 0xb0, 0x12, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, - 0xf4, 0x37, 0xfd, 0xfa, 0xdc, 0xd2, 0x3c, 0x16, 0xa3, 0xa0, 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, - 0x47, 0x97, 0x60, 0x40, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, - 0x1e, 0xa0, 0x19, 0xf8, 0xbb, 0x6e, 0x9d, 0xc5, 0x2c, 0x29, 0x9a, 0xde, 0x46, 0x15, 0x85, 0xc1, - 0x1a, 0x15, 0x7a, 0x15, 0x86, 0x5b, 0x5e, 0xc8, 0xc5, 0x11, 0x2d, 0x32, 0xb4, 0xf2, 0x83, 0xb9, - 0xa9, 0x23, 0xb1, 0x49, 0x8b, 0x66, 0xa0, 0x2f, 0x72, 0x98, 0xf7, 0x4c, 0x6f, 0xbe, 0x53, 0xf0, - 0x1a, 0xa5, 0xd0, 0x93, 0x21, 0xd1, 0x02, 0x58, 0x14, 0x44, 0x6f, 0xca, 0xe7, 0xc2, 0x7c, 0x63, - 0x17, 0xde, 0xf8, 0xdd, 0x1d, 0x02, 0xda, 0x63, 0x61, 0xe1, 0xe5, 0x6f, 0xf0, 0x42, 0xaf, 0x00, - 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0x7c, 0xde, 0x94, 0x5c, 0x30, 0xef, 0xaf, 0xfa, 0xd1, - 0xcd, 0x90, 0x2c, 0x28, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x53, 0x02, 0x88, 0xe5, 0x76, 0x74, 0x2f, - 0xb5, 0x71, 0x3d, 0xdb, 0x5e, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, 0x7d, 0x9f, 0x05, 0x83, 0x22, 0x34, - 0x0b, 0x1b, 0xa1, 0x42, 0xfb, 0x8d, 0xd3, 0x8c, 0x10, 0x43, 0x4b, 0xf0, 0x26, 0xbc, 0x20, 0x67, - 0xa8, 0x86, 0xe9, 0xd8, 0x0a, 0xbd, 0x62, 0xf4, 0x7e, 0x79, 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, - 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x9b, 0xc6, 0x2d, 0xb1, 0x27, 0xff, 0x3d, 0xa4, 0x21, - 0xbe, 0x76, 0xba, 0x20, 0xa2, 0x8a, 0x1e, 0x1b, 0xa1, 0x37, 0xff, 0x11, 0x9f, 0x76, 0x4f, 0xea, - 0x10, 0x17, 0xe1, 0x33, 0x30, 0x5a, 0x37, 0x85, 0x00, 0x31, 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, - 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, 0x43, 0x65, 0x2c, 0x79, 0x1b, 0xbe, - 0x78, 0x11, 0x62, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x1d, 0x4a, 0x19, 0x9f, 0xee, 0xab, 0xa2, - 0x2c, 0x56, 0x5c, 0xd0, 0xeb, 0xd0, 0xc7, 0x5e, 0x71, 0x85, 0x93, 0x03, 0xf9, 0x1a, 0x67, 0x33, - 0x66, 0x60, 0xbc, 0x20, 0xd9, 0xdf, 0x10, 0x0b, 0x0e, 0xe8, 0x9a, 0x7c, 0x23, 0x19, 0x2e, 0x79, - 0x37, 0x43, 0xc2, 0xde, 0x48, 0x96, 0x66, 0x1f, 0x8f, 0x9f, 0x3f, 0x72, 0x78, 0x66, 0xca, 0x44, - 0xa3, 0x24, 0x95, 0xa2, 0xc4, 0x7f, 0x99, 0x89, 0x51, 0x44, 0x38, 0xca, 0x6c, 0x9e, 0x99, 0xad, - 0x31, 0xee, 0xce, 0x5b, 0x26, 0x0b, 0x9c, 0xe4, 0x49, 0x25, 0x52, 0xbe, 0xea, 0xc5, 0x9b, 0x92, - 0x4e, 0x7b, 0x07, 0xbf, 0x88, 0xb3, 0xd3, 0x88, 0x43, 0xb0, 0x28, 0x7f, 0xac, 0xe2, 0xc1, 0x94, - 0x07, 0x63, 0xc9, 0x25, 0xfa, 0x50, 0xc5, 0x91, 0x3f, 0xe9, 0x81, 0x11, 0x73, 0x4a, 0xa1, 0xcb, - 0x50, 0x12, 0x4c, 0x54, 0x36, 0x13, 0xb5, 0x4a, 0x56, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0x89, 0x0d, - 0x2b, 0xae, 0x39, 0x11, 0xc7, 0x49, 0x6c, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xee, 0xfb, - 0x91, 0x3a, 0x90, 0xd4, 0xbc, 0x9b, 0x65, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x26, 0x81, 0x47, - 0x1a, 0x66, 0x14, 0x71, 0x75, 0x10, 0x5d, 0xd7, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, - 0x89, 0x2c, 0xae, 0x6f, 0xb1, 0x53, 0x76, 0x95, 0x3f, 0x2f, 0x97, 0x78, 0xf4, 0x71, 0x38, 0xad, - 0x22, 0x76, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0xf6, 0x19, 0xda, 0x96, 0xd3, 0x73, 0xd9, 0x64, 0x38, - 0xaf, 0x3c, 0x7a, 0x0d, 0x46, 0x84, 0x88, 0x2f, 0x39, 0xf6, 0x9b, 0x1e, 0x46, 0xd7, 0x0d, 0x2c, - 0x4e, 0x50, 0xcb, 0x38, 0xe8, 0x4c, 0xca, 0x96, 0x1c, 0x06, 0xd2, 0x71, 0xd0, 0x75, 0x3c, 0x4e, - 0x95, 0x40, 0x33, 0x30, 0xca, 0x65, 0x30, 0xd7, 0xdb, 0xe4, 0x63, 0x22, 0x9e, 0x7c, 0xa9, 0x25, - 0x75, 0xc3, 0x44, 0xe3, 0x24, 0x3d, 0x7a, 0x19, 0x86, 0x9c, 0xa0, 0xb6, 0xe5, 0x46, 0xa4, 0x16, - 0xb5, 0x02, 0xfe, 0x16, 0x4c, 0x73, 0xd1, 0x9a, 0xd1, 0x70, 0xd8, 0xa0, 0xb4, 0xef, 0xc1, 0x44, - 0x46, 0xdc, 0x09, 0x3a, 0x71, 0x9c, 0xa6, 0x2b, 0xbf, 0x29, 0xe1, 0x07, 0x3d, 0x53, 0x59, 0x92, - 0x5f, 0xa3, 0x51, 0xd1, 0xd9, 0xc9, 0xe2, 0x53, 0x68, 0x89, 0x57, 0xd5, 0xec, 0x5c, 0x94, 0x08, - 0x1c, 0xd3, 0xd8, 0xff, 0xa9, 0x00, 0xa3, 0x19, 0xb6, 0x15, 0x96, 0xfc, 0x33, 0x71, 0x49, 0x89, - 0x73, 0x7d, 0x9a, 0x61, 0xf5, 0x0b, 0x87, 0x08, 0xab, 0x5f, 0xec, 0x14, 0x56, 0xbf, 0xe7, 0x9d, - 0x84, 0xd5, 0x37, 0x7b, 0xac, 0xb7, 0xab, 0x1e, 0xcb, 0x08, 0xc5, 0xdf, 0x77, 0xc8, 0x50, 0xfc, - 0x46, 0xa7, 0xf7, 0x77, 0xd1, 0xe9, 0x3f, 0x52, 0x80, 0xb1, 0xa4, 0x2b, 0xe9, 0x31, 0xe8, 0x6d, - 0x5f, 0x37, 0xf4, 0xb6, 0x97, 0xba, 0x79, 0xa2, 0x9b, 0xab, 0xc3, 0xc5, 0x09, 0x1d, 0xee, 0xd3, - 0x5d, 0x71, 0x6b, 0xaf, 0xcf, 0xfd, 0xc9, 0x02, 0x9c, 0xcc, 0x7c, 0x23, 0x7c, 0x0c, 0x7d, 0x73, - 0xc3, 0xe8, 0x9b, 0xe7, 0xba, 0x7e, 0xbe, 0x9c, 0xdb, 0x41, 0xb7, 0x13, 0x1d, 0x74, 0xb9, 0x7b, - 0x96, 0xed, 0x7b, 0xe9, 0xeb, 0x45, 0x38, 0x9f, 0x59, 0x2e, 0x56, 0x7b, 0x2e, 0x1a, 0x6a, 0xcf, - 0xe7, 0x13, 0x6a, 0x4f, 0xbb, 0x7d, 0xe9, 0xa3, 0xd1, 0x83, 0x8a, 0x67, 0xbc, 0x2c, 0x18, 0xc1, - 0x03, 0xea, 0x40, 0x8d, 0x67, 0xbc, 0x8a, 0x11, 0x36, 0xf9, 0x7e, 0x3b, 0xe9, 0x3e, 0x7f, 0xdb, - 0x82, 0x33, 0x99, 0x63, 0x73, 0x0c, 0xba, 0xae, 0x55, 0x53, 0xd7, 0xf5, 0x54, 0xd7, 0xb3, 0x35, - 0x47, 0xf9, 0xf5, 0xd3, 0xbd, 0x39, 0xdf, 0xc2, 0x6e, 0xf2, 0x37, 0x60, 0xd0, 0xa9, 0xd5, 0x48, - 0x18, 0xae, 0xf8, 0x75, 0x15, 0x81, 0xfb, 0x39, 0x76, 0xcf, 0x8a, 0xc1, 0x07, 0xfb, 0xe5, 0xa9, - 0x24, 0x8b, 0x18, 0x8d, 0x75, 0x0e, 0xe8, 0x93, 0x30, 0x10, 0x8a, 0x73, 0x53, 0x8c, 0xfd, 0x0b, - 0x5d, 0x76, 0x8e, 0xb3, 0x4e, 0x1a, 0x66, 0xa8, 0x27, 0xa5, 0xa9, 0x50, 0x2c, 0xcd, 0xb0, 0x30, - 0x85, 0x23, 0x0d, 0x0b, 0xf3, 0x3c, 0xc0, 0xae, 0xba, 0x0c, 0x24, 0xf5, 0x0f, 0xda, 0x35, 0x41, - 0xa3, 0x42, 0x1f, 0x85, 0xb1, 0x90, 0xc7, 0x42, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0x6d, 0x23, 0x66, - 0x21, 0x0b, 0x27, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x59, 0xe0, 0x46, 0x3e, - 0x31, 0x2f, 0xc6, 0x35, 0x8a, 0xd4, 0xe3, 0x27, 0x92, 0xdd, 0xcf, 0x3a, 0x5e, 0x2b, 0x89, 0x3e, - 0x09, 0x40, 0xa7, 0x8f, 0xd0, 0x43, 0xf4, 0xe7, 0x6f, 0x9e, 0x74, 0x57, 0xa9, 0x67, 0x3a, 0x37, - 0xb3, 0x97, 0xb7, 0xf3, 0x8a, 0x09, 0xd6, 0x18, 0x22, 0x07, 0x86, 0xe3, 0x7f, 0x71, 0x66, 0xde, - 0x4b, 0xb9, 0x35, 0x24, 0x99, 0x33, 0x95, 0xf7, 0xbc, 0xce, 0x02, 0x9b, 0x1c, 0xed, 0x7f, 0x37, - 0x00, 0x8f, 0xb4, 0xd9, 0x86, 0xd1, 0x8c, 0x69, 0xea, 0x7d, 0x26, 0x79, 0x7f, 0x9f, 0xca, 0x2c, - 0x6c, 0x5c, 0xe8, 0x13, 0xb3, 0xbd, 0xf0, 0x8e, 0x67, 0xfb, 0x0f, 0x59, 0x9a, 0x66, 0x85, 0x3b, - 0x95, 0x7e, 0xe4, 0x90, 0xc7, 0xcb, 0x11, 0xaa, 0x5a, 0x36, 0x32, 0xf4, 0x15, 0xcf, 0x77, 0xdd, - 0x9c, 0xee, 0x15, 0x18, 0x5f, 0xcd, 0x0e, 0x00, 0xcc, 0x55, 0x19, 0x57, 0x0f, 0xfb, 0xfd, 0xc7, - 0x15, 0x0c, 0xf8, 0xf7, 0x2d, 0x38, 0x93, 0x02, 0xf3, 0x36, 0x90, 0x50, 0xc4, 0xa8, 0x5a, 0x7d, - 0xc7, 0x8d, 0x97, 0x0c, 0xf9, 0x37, 0x5c, 0x13, 0xdf, 0x70, 0x26, 0x97, 0x2e, 0xd9, 0xf4, 0x1f, - 0xfc, 0xa3, 0xf2, 0x04, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x3f, 0xde, 0x8b, 0xff, 0x37, 0x27, - 0xf6, 0xf1, 0xd4, 0x32, 0x9c, 0x6f, 0xdf, 0xd5, 0x87, 0x7a, 0x9e, 0xfc, 0x7b, 0x16, 0x9c, 0x6b, - 0x1b, 0x03, 0xe7, 0x5b, 0x50, 0xce, 0xb5, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x25, 0x0c, 0xef, 0xb8, - 0xcb, 0x50, 0xaa, 0x25, 0xf2, 0xa1, 0xc6, 0xd1, 0x20, 0x54, 0x2e, 0xd4, 0x98, 0xc6, 0x70, 0x82, - 0x2b, 0x74, 0x74, 0x82, 0xfb, 0x0d, 0x0b, 0x52, 0x67, 0xd5, 0x31, 0x08, 0x4d, 0x4b, 0xa6, 0xd0, - 0xf4, 0x78, 0x37, 0xbd, 0x99, 0x23, 0x2f, 0xfd, 0xc5, 0x28, 0x9c, 0xca, 0x79, 0x5d, 0xb8, 0x0b, - 0xe3, 0x9b, 0x35, 0x62, 0x3e, 0x27, 0x6f, 0x17, 0x66, 0xa9, 0xed, 0xdb, 0x73, 0x9e, 0x86, 0x36, - 0x45, 0x82, 0xd3, 0x55, 0xa0, 0xcf, 0x5b, 0x70, 0xc2, 0xb9, 0x13, 0x2e, 0x50, 0xe1, 0xd7, 0xad, - 0xcd, 0x36, 0xfc, 0xda, 0x36, 0x95, 0x2c, 0xe4, 0xb2, 0x7a, 0x31, 0x53, 0x21, 0x79, 0xbb, 0x9a, - 0xa2, 0x37, 0xaa, 0x67, 0x49, 0xc7, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0x08, 0x8b, 0xfc, 0x28, 0xf4, - 0x6a, 0xdd, 0x26, 0xe0, 0x41, 0xd6, 0x33, 0x50, 0x2e, 0xcd, 0x49, 0x0c, 0x56, 0x7c, 0xd0, 0xa7, - 0xa1, 0xb4, 0x29, 0xdf, 0x36, 0x67, 0x48, 0x8b, 0x71, 0x47, 0xb6, 0x7f, 0xf1, 0xcd, 0xbd, 0x0a, - 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x84, 0xed, 0xf2, 0x76, 0x27, 0xdc, 0x47, - 0x79, 0x58, 0x91, 0xd5, 0xc5, 0x2a, 0xa6, 0x05, 0xd1, 0x35, 0x28, 0x06, 0xeb, 0x75, 0xa1, 0x4d, - 0xcf, 0x5c, 0xa4, 0x78, 0x76, 0x3e, 0xa7, 0x55, 0x8c, 0x13, 0x9e, 0x9d, 0xc7, 0x94, 0x05, 0xaa, - 0x40, 0x2f, 0x7b, 0x92, 0x27, 0x64, 0xb3, 0xcc, 0x5b, 0x68, 0x9b, 0xa7, 0xad, 0x3c, 0xf6, 0x08, - 0x23, 0xc0, 0x9c, 0x11, 0x5a, 0x83, 0xbe, 0x1a, 0xcb, 0xf1, 0x2c, 0x84, 0xb1, 0xf7, 0x67, 0xea, - 0xcd, 0xdb, 0x24, 0xbf, 0x16, 0x6a, 0x64, 0x46, 0x81, 0x05, 0x2f, 0xc6, 0x95, 0x34, 0xb7, 0x36, - 0x42, 0xa6, 0x77, 0xcb, 0xe3, 0xda, 0x26, 0xa7, 0xbb, 0xe0, 0xca, 0x28, 0xb0, 0xe0, 0x85, 0x5e, - 0x81, 0xc2, 0x46, 0x4d, 0x3c, 0xb7, 0xcb, 0x54, 0xa0, 0x9b, 0x91, 0x61, 0x66, 0xfb, 0xee, 0xef, - 0x97, 0x0b, 0x8b, 0x73, 0xb8, 0xb0, 0x51, 0x43, 0xab, 0xd0, 0xbf, 0xc1, 0x63, 0x49, 0x08, 0x1d, - 0xf9, 0x93, 0xd9, 0x61, 0x2e, 0x52, 0xe1, 0x26, 0xf8, 0xd3, 0x2d, 0x81, 0xc0, 0x92, 0x09, 0x4b, - 0xd7, 0xa1, 0x62, 0x62, 0x88, 0x90, 0x7c, 0xd3, 0x87, 0x8b, 0x63, 0xc2, 0x65, 0xe5, 0x38, 0xb2, - 0x06, 0xd6, 0x38, 0xd2, 0x59, 0xed, 0xdc, 0x6b, 0x05, 0x2c, 0x5e, 0xbb, 0x88, 0xdd, 0x94, 0x39, - 0xab, 0x67, 0x24, 0x51, 0xbb, 0x59, 0xad, 0x88, 0x70, 0xcc, 0x14, 0x6d, 0xc3, 0xf0, 0x6e, 0xd8, - 0xdc, 0x22, 0x72, 0x49, 0xb3, 0x50, 0x4e, 0x39, 0xb2, 0xde, 0x2d, 0x41, 0xe8, 0x06, 0x51, 0xcb, - 0x69, 0xa4, 0x76, 0x21, 0x26, 0x97, 0xdf, 0xd2, 0x99, 0x61, 0x93, 0x37, 0xed, 0xfe, 0xb7, 0x5b, - 0xfe, 0xfa, 0x5e, 0x44, 0x44, 0x24, 0xbd, 0xcc, 0xee, 0x7f, 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, - 0xb0, 0x64, 0x82, 0x6e, 0x89, 0xee, 0x61, 0xbb, 0xe7, 0x58, 0x7e, 0x98, 0xde, 0x19, 0x49, 0x94, - 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb9, 0xe5, 0x47, 0xbe, 0x97, 0xd8, 0xa1, - 0xc7, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, - 0x8c, 0x34, 0xfd, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7e, 0xa1, 0x36, 0x3a, 0x3e, 0x83, 0x52, 0xd4, - 0xc8, 0x82, 0x54, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x06, 0xfd, 0x61, 0xcd, 0x69, 0x90, 0xa5, - 0x1b, 0x93, 0x13, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, 0x2e, 0x1e, 0x0a, 0x84, 0x93, 0x60, - 0xc9, 0x0e, 0x2d, 0x42, 0x2f, 0x4b, 0x83, 0xc9, 0xc2, 0x3e, 0xe6, 0x44, 0x1b, 0x4e, 0x39, 0xf3, - 0xf3, 0xbd, 0x89, 0x81, 0x31, 0x2f, 0x4e, 0xd7, 0x80, 0xb8, 0xea, 0xfa, 0xe1, 0xe4, 0xc9, 0xfc, - 0x35, 0x20, 0x6e, 0xc8, 0x37, 0xaa, 0xed, 0xd6, 0x80, 0x22, 0xc2, 0x31, 0x53, 0xba, 0x33, 0xd3, - 0xdd, 0xf4, 0x54, 0x1b, 0x2f, 0xb4, 0xdc, 0xbd, 0x94, 0xed, 0xcc, 0x74, 0x27, 0xa5, 0x2c, 0xec, - 0x3f, 0xee, 0x4f, 0xcb, 0x2c, 0x4c, 0x39, 0xf2, 0xbf, 0x5b, 0x29, 0xbb, 0xf9, 0x07, 0xba, 0xd5, - 0xd5, 0x1e, 0xe1, 0xb5, 0xee, 0xf3, 0x16, 0x9c, 0x6a, 0x66, 0x7e, 0x88, 0x10, 0x00, 0xba, 0x53, - 0xf9, 0xf2, 0x4f, 0x57, 0x21, 0x42, 0xb3, 0xf1, 0x38, 0xa7, 0xa6, 0xe4, 0xd5, 0xb9, 0xf8, 0x8e, - 0xaf, 0xce, 0x2b, 0x30, 0x50, 0xe3, 0xf7, 0x1c, 0x19, 0xda, 0xba, 0xab, 0x00, 0x77, 0x4c, 0x94, - 0x10, 0x17, 0xa4, 0x0d, 0xac, 0x58, 0xa0, 0x1f, 0xb6, 0xe0, 0x5c, 0xb2, 0xe9, 0x98, 0x30, 0xb4, - 0x88, 0x2b, 0xca, 0xf5, 0x32, 0x8b, 0xe2, 0xfb, 0x53, 0xf2, 0xbf, 0x41, 0x7c, 0xd0, 0x89, 0x00, - 0xb7, 0xaf, 0x0c, 0xcd, 0x67, 0x28, 0x86, 0xfa, 0x4c, 0x63, 0x58, 0x17, 0xca, 0xa1, 0x17, 0x61, - 0x68, 0xc7, 0x6f, 0x79, 0x91, 0x70, 0x5a, 0x13, 0x0e, 0x34, 0xcc, 0x71, 0x64, 0x45, 0x83, 0x63, - 0x83, 0x2a, 0xa1, 0x52, 0x1a, 0x78, 0x60, 0x95, 0xd2, 0x5b, 0x30, 0xe4, 0x69, 0x5e, 0xd6, 0x42, - 0x1e, 0xb8, 0x98, 0x1f, 0x13, 0x58, 0xf7, 0xc9, 0xe6, 0xad, 0xd4, 0x21, 0xd8, 0xe0, 0x76, 0xbc, - 0xde, 0x6c, 0x3f, 0x5f, 0xc8, 0x10, 0xea, 0xb9, 0x5a, 0xe9, 0xc3, 0xa6, 0x5a, 0xe9, 0x62, 0x52, - 0xad, 0x94, 0x32, 0x84, 0x18, 0x1a, 0xa5, 0xee, 0x53, 0x64, 0x75, 0x1d, 0x57, 0xf4, 0xbb, 0x2d, - 0x38, 0xcd, 0x34, 0xeb, 0xb4, 0x82, 0x77, 0xac, 0x4d, 0x7f, 0xe4, 0xfe, 0x7e, 0xf9, 0xf4, 0x72, - 0x36, 0x3b, 0x9c, 0x57, 0x8f, 0xdd, 0x80, 0x0b, 0x9d, 0x8e, 0x46, 0xe6, 0x41, 0x59, 0x57, 0xa6, - 0xf7, 0xd8, 0x83, 0xb2, 0xbe, 0x34, 0x8f, 0x19, 0xa6, 0xdb, 0xa8, 0x59, 0xf6, 0x7f, 0xb0, 0xa0, - 0x58, 0xf1, 0xeb, 0xc7, 0x70, 0xe9, 0xfe, 0x88, 0x71, 0xe9, 0x7e, 0x24, 0xfb, 0x50, 0xae, 0xe7, - 0x9a, 0x92, 0x16, 0x12, 0xa6, 0xa4, 0x73, 0x79, 0x0c, 0xda, 0x1b, 0x8e, 0x7e, 0xaa, 0x08, 0x83, - 0x15, 0xbf, 0xae, 0x9e, 0x2f, 0xfc, 0xe3, 0x07, 0x79, 0xbe, 0x90, 0x9b, 0xf4, 0x44, 0xe3, 0xcc, - 0x1c, 0x2f, 0xe5, 0xcb, 0xed, 0x6f, 0xb1, 0x57, 0x0c, 0xb7, 0x89, 0xbb, 0xb9, 0x15, 0x91, 0x7a, - 0xf2, 0x73, 0x8e, 0xef, 0x15, 0xc3, 0x1f, 0x17, 0x60, 0x34, 0x51, 0x3b, 0x6a, 0xc0, 0x70, 0x43, - 0x37, 0x54, 0x88, 0x79, 0xfa, 0x40, 0x36, 0x0e, 0xe1, 0x05, 0xae, 0x81, 0xb0, 0xc9, 0x1c, 0x4d, - 0x03, 0x28, 0xcb, 0xbd, 0x54, 0x57, 0xb3, 0x9b, 0x87, 0x32, 0xed, 0x87, 0x58, 0xa3, 0x40, 0x2f, - 0xc1, 0x60, 0xe4, 0x37, 0xfd, 0x86, 0xbf, 0xb9, 0x77, 0x9d, 0xc8, 0x80, 0x6a, 0xca, 0xb7, 0x73, - 0x2d, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x71, 0xc5, 0xa4, 0x7a, 0x04, 0xc6, 0x1b, 0xa6, 0xd9, - 0x58, 0x4d, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2d, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xbd, 0xd5, - 0xf0, 0xee, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x8c, 0xd6, 0xce, 0x1c, 0xd7, 0xa4, 0xa8, 0xa1, 0x22, - 0xa1, 0x5b, 0x6d, 0x22, 0xa1, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x45, 0x42, 0x7f, 0xa8, 0x6d, - 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x6d, 0x75, 0x3a, 0x12, 0x04, 0x58, - 0x60, 0x65, 0xa0, 0xf4, 0x9e, 0xec, 0x40, 0xe9, 0x3c, 0xde, 0xad, 0x70, 0x71, 0x12, 0x42, 0x9f, - 0x16, 0xef, 0x56, 0xfa, 0x3e, 0xc5, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x54, 0xf1, 0xeb, 0xb1, 0xd5, - 0xfe, 0x45, 0xc3, 0x6a, 0x7f, 0x21, 0x61, 0xb5, 0x1f, 0xd3, 0x69, 0xdf, 0xb3, 0xd1, 0x7f, 0xb3, - 0x6c, 0xf4, 0xbf, 0x6e, 0xb1, 0x51, 0x9b, 0x5f, 0xad, 0x72, 0x3f, 0x48, 0x74, 0x05, 0x06, 0xd9, - 0x06, 0xc3, 0x5e, 0x77, 0x4b, 0x53, 0x36, 0x4b, 0x5c, 0xb6, 0x1a, 0x83, 0xb1, 0x4e, 0x83, 0x2e, - 0xc1, 0x40, 0x48, 0x9c, 0xa0, 0xb6, 0xa5, 0x76, 0x57, 0x61, 0x77, 0xe6, 0x30, 0xac, 0xb0, 0xe8, - 0x8d, 0x38, 0xd4, 0x6a, 0x31, 0xff, 0xb5, 0xa8, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0xe3, 0xab, 0xda, - 0xb7, 0x01, 0xa5, 0xe9, 0xbb, 0x08, 0x06, 0x58, 0x36, 0x83, 0x01, 0x96, 0x52, 0x81, 0x00, 0xff, - 0xca, 0x82, 0x91, 0x8a, 0x5f, 0xa7, 0x4b, 0xf7, 0xdb, 0x69, 0x9d, 0xea, 0x71, 0xa6, 0xfb, 0xda, - 0xc4, 0x99, 0x7e, 0x0c, 0x7a, 0x2b, 0x7e, 0xbd, 0x43, 0xc0, 0xc2, 0xbf, 0x61, 0x41, 0x7f, 0xc5, - 0xaf, 0x1f, 0x83, 0x69, 0xe2, 0xc3, 0xa6, 0x69, 0xe2, 0x74, 0xce, 0xbc, 0xc9, 0xb1, 0x46, 0xfc, - 0xff, 0x3d, 0x30, 0x4c, 0xdb, 0xe9, 0x6f, 0xca, 0xa1, 0x34, 0xba, 0xcd, 0xea, 0xa2, 0xdb, 0xa8, - 0x14, 0xee, 0x37, 0x1a, 0xfe, 0x9d, 0xe4, 0xb0, 0x2e, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x2c, 0x0c, - 0x34, 0x03, 0xb2, 0xeb, 0xfa, 0x42, 0xbc, 0xd5, 0x0c, 0x3d, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, - 0x4d, 0x43, 0xd7, 0xa3, 0x47, 0x79, 0xcd, 0xf7, 0xea, 0x5c, 0x7b, 0x5f, 0x14, 0xc9, 0x50, 0x34, - 0x38, 0x36, 0xa8, 0xd0, 0x6d, 0x28, 0xb1, 0xff, 0x6c, 0xdb, 0x39, 0x7c, 0x1a, 0x66, 0x91, 0x1e, - 0x52, 0x30, 0xc0, 0x31, 0x2f, 0xf4, 0x3c, 0x40, 0x24, 0x13, 0x0a, 0x84, 0x22, 0x70, 0x9d, 0xba, - 0x0a, 0xa8, 0x54, 0x03, 0x21, 0xd6, 0xa8, 0xd0, 0x33, 0x50, 0x8a, 0x1c, 0xb7, 0xb1, 0xec, 0x7a, - 0xcc, 0xfe, 0x4b, 0xdb, 0x2f, 0xb2, 0x34, 0x0a, 0x20, 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x4d, - 0x78, 0x12, 0xfa, 0x01, 0x46, 0xcd, 0x44, 0xb1, 0x65, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x82, 0xb3, - 0xae, 0xc7, 0x12, 0x87, 0x90, 0xea, 0xb6, 0xdb, 0x5c, 0x5b, 0xae, 0xde, 0x22, 0x81, 0xbb, 0xb1, - 0x37, 0xeb, 0xd4, 0xb6, 0x89, 0x27, 0x13, 0xec, 0xca, 0xbc, 0xeb, 0x67, 0x97, 0xda, 0xd0, 0xe2, - 0xb6, 0x9c, 0xec, 0x17, 0xd8, 0x7c, 0xbf, 0x51, 0x45, 0x4f, 0x1b, 0x5b, 0xc7, 0x29, 0x7d, 0xeb, - 0x38, 0xd8, 0x2f, 0xf7, 0xdd, 0xa8, 0x6a, 0x31, 0x39, 0x5e, 0x86, 0x93, 0x15, 0xbf, 0x5e, 0xf1, - 0x83, 0x68, 0xd1, 0x0f, 0xee, 0x38, 0x41, 0x5d, 0x4e, 0xaf, 0xb2, 0x8c, 0x4a, 0x42, 0xf7, 0xcf, - 0x5e, 0xbe, 0xbb, 0x18, 0x11, 0x47, 0x5e, 0x60, 0x12, 0xdb, 0x21, 0xdf, 0xd2, 0xd5, 0x98, 0xec, - 0xa0, 0x52, 0xef, 0x5c, 0x75, 0x22, 0x82, 0x6e, 0xb0, 0x14, 0xfa, 0xf1, 0x31, 0x2a, 0x8a, 0x3f, - 0xa5, 0xa5, 0xd0, 0x8f, 0x91, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0x9c, 0xa8, 0x84, 0xeb, 0x01, - 0xb8, 0xbf, 0x62, 0x37, 0x39, 0xa8, 0x65, 0x6e, 0x8e, 0x42, 0x7e, 0x52, 0x07, 0x6e, 0x79, 0x6d, - 0x9b, 0x9b, 0xc3, 0xfe, 0x4e, 0x38, 0x95, 0xac, 0xbe, 0xeb, 0x44, 0xd8, 0x73, 0x30, 0x1e, 0xe8, - 0x05, 0xb5, 0x44, 0x67, 0x27, 0x79, 0x3e, 0x85, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x97, 0x60, 0x9c, - 0xde, 0x3d, 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x73, 0x78, 0x96, 0xff, 0xd8, 0xcb, 0x0e, 0xa2, 0x44, - 0xd6, 0x1b, 0xf4, 0x29, 0x18, 0x09, 0xc9, 0xb2, 0xeb, 0xb5, 0xee, 0x4a, 0xed, 0x53, 0x9b, 0x47, - 0xa4, 0xd5, 0x05, 0x9d, 0x92, 0xeb, 0xb0, 0x4d, 0x18, 0x4e, 0x70, 0x43, 0x3b, 0x30, 0x72, 0xc7, - 0xf5, 0xea, 0xfe, 0x9d, 0x50, 0xf2, 0x1f, 0xc8, 0x57, 0x65, 0xdf, 0xe6, 0x94, 0x89, 0x36, 0x1a, - 0xd5, 0xdd, 0x36, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0x96, 0x37, 0x13, 0xde, 0x0c, 0x09, - 0x7f, 0x16, 0x28, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, 0xae, 0x06, 0x7e, - 0x8b, 0xa7, 0x58, 0x11, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, 0xfe, 0xad, 0xfa, - 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x45, 0x98, 0x06, 0xc7, 0x06, 0x15, 0x5a, 0x04, 0x14, - 0xb6, 0x9a, 0xcd, 0x06, 0xf3, 0x4e, 0x73, 0x1a, 0x8c, 0x15, 0x77, 0xdb, 0x29, 0xf2, 0x10, 0xd1, - 0xd5, 0x14, 0x16, 0x67, 0x94, 0xa0, 0xe7, 0xe2, 0x86, 0x68, 0x6a, 0x2f, 0x6b, 0x2a, 0x37, 0x7b, - 0x55, 0x79, 0x3b, 0x25, 0x0e, 0x2d, 0x40, 0x7f, 0xb8, 0x17, 0xd6, 0xa2, 0x46, 0xd8, 0x2e, 0x21, - 0x5b, 0x95, 0x91, 0x68, 0xf9, 0x40, 0x79, 0x11, 0x2c, 0xcb, 0xa2, 0x1a, 0x4c, 0x08, 0x8e, 0x73, - 0x5b, 0x8e, 0xa7, 0xd2, 0x44, 0x71, 0x27, 0xfd, 0x2b, 0xf7, 0xf7, 0xcb, 0x13, 0xa2, 0x66, 0x1d, - 0x7d, 0xb0, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0x3b, - 0xcd, 0x4a, 0xe0, 0x6f, 0xb8, 0x0d, 0xd2, 0xce, 0x74, 0x58, 0x35, 0x28, 0xc5, 0xe4, 0x33, 0x60, - 0x38, 0xc1, 0xcd, 0xfe, 0x1c, 0x93, 0x1d, 0xab, 0xee, 0xa6, 0xe7, 0x44, 0xad, 0x80, 0xa0, 0x1d, - 0x18, 0x6e, 0xb2, 0xdd, 0x45, 0x24, 0x3e, 0x11, 0x73, 0xfd, 0xc5, 0x2e, 0xd5, 0x4f, 0x77, 0x58, - 0xea, 0x36, 0xc3, 0xd5, 0xad, 0xa2, 0xb3, 0xc3, 0x26, 0x77, 0xfb, 0x5f, 0x9c, 0x61, 0xd2, 0x47, - 0x95, 0xeb, 0x94, 0xfa, 0xc5, 0x9b, 0x20, 0x71, 0x8d, 0x9d, 0xca, 0x57, 0xb0, 0xc6, 0xc3, 0x22, - 0xde, 0x15, 0x61, 0x59, 0x16, 0x7d, 0x12, 0x46, 0xe8, 0xad, 0x50, 0x49, 0x00, 0xe1, 0xe4, 0x89, - 0xfc, 0xd8, 0x2d, 0x8a, 0x4a, 0x4f, 0x8a, 0xa4, 0x17, 0xc6, 0x09, 0x66, 0xe8, 0x0d, 0xe6, 0x5a, - 0x26, 0x59, 0x17, 0xba, 0x61, 0xad, 0x7b, 0x91, 0x49, 0xb6, 0x1a, 0x13, 0xd4, 0x82, 0x89, 0x74, - 0xea, 0xc7, 0x70, 0xd2, 0xce, 0x17, 0xaf, 0xd3, 0xd9, 0x1b, 0xe3, 0xec, 0x35, 0x69, 0x5c, 0x88, - 0xb3, 0xf8, 0xa3, 0xe5, 0x64, 0x62, 0xbe, 0xa2, 0xa1, 0xf7, 0x4d, 0x25, 0xe7, 0x1b, 0x6e, 0x9b, - 0x93, 0x6f, 0x13, 0xce, 0x69, 0xb9, 0xcd, 0xae, 0x06, 0x0e, 0x73, 0xde, 0x70, 0xd9, 0x76, 0xaa, - 0xc9, 0x45, 0x8f, 0xde, 0xdf, 0x2f, 0x9f, 0x5b, 0x6b, 0x47, 0x88, 0xdb, 0xf3, 0x41, 0x37, 0xe0, - 0x24, 0x8f, 0x3c, 0x30, 0x4f, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xe0, 0xc5, 0x97, 0xfc, 0x99, 0xfb, - 0xfb, 0xe5, 0x93, 0x33, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x30, 0x94, 0xea, 0x5e, 0x28, 0xfa, - 0xa0, 0xcf, 0x48, 0x1f, 0x57, 0x9a, 0x5f, 0xad, 0xaa, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, - 0xe4, 0xb6, 0x01, 0xa5, 0x2d, 0xea, 0x4f, 0x45, 0x5e, 0x4b, 0x2a, 0x54, 0x8d, 0xb7, 0xc7, 0xdc, - 0x28, 0xa6, 0x9e, 0xe4, 0x18, 0xcf, 0x92, 0x0d, 0xc6, 0xe8, 0x75, 0x40, 0x22, 0x4d, 0xc1, 0x4c, - 0x8d, 0x65, 0xd5, 0x61, 0x47, 0xe3, 0x80, 0xf9, 0x1a, 0xb6, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, - 0x35, 0xba, 0xab, 0xe8, 0x50, 0xb1, 0x6b, 0xa9, 0x24, 0xa5, 0xf3, 0xa4, 0x19, 0x10, 0xe6, 0x63, - 0x66, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0x67, 0x9d, 0x56, 0xe4, 0x33, 0xb3, 0x8b, 0x49, 0xba, - 0xe6, 0x6f, 0x13, 0x8f, 0x59, 0x3c, 0x07, 0x66, 0x2f, 0x50, 0xc9, 0x6e, 0xa6, 0x0d, 0x1d, 0x6e, - 0xcb, 0x85, 0x4a, 0xe4, 0x2a, 0x2b, 0x39, 0x98, 0xf1, 0xe4, 0x32, 0x32, 0x93, 0xbf, 0x04, 0x83, - 0x5b, 0x7e, 0x18, 0xad, 0x92, 0xe8, 0x8e, 0x1f, 0x6c, 0x8b, 0xb8, 0xc8, 0x71, 0x2c, 0xfa, 0x18, - 0x85, 0x75, 0x3a, 0x7a, 0xe5, 0x66, 0xfe, 0x38, 0x4b, 0xf3, 0xcc, 0x15, 0x62, 0x20, 0xde, 0x63, - 0xae, 0x71, 0x30, 0x96, 0x78, 0x49, 0xba, 0x54, 0x99, 0x63, 0x6e, 0x0d, 0x09, 0xd2, 0xa5, 0xca, - 0x1c, 0x96, 0x78, 0x3a, 0x5d, 0xc3, 0x2d, 0x27, 0x20, 0x95, 0xc0, 0xaf, 0x91, 0x50, 0xcb, 0x80, - 0xf0, 0x08, 0x8f, 0xfa, 0x4c, 0xa7, 0x6b, 0x35, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x24, 0x9d, 0xd7, - 0x6f, 0x24, 0xdf, 0x1e, 0x95, 0x96, 0x67, 0xba, 0x4c, 0xed, 0xe7, 0xc1, 0x98, 0xca, 0x28, 0xc8, - 0xe3, 0x3c, 0x87, 0x93, 0xa3, 0x6c, 0x6e, 0x77, 0x1f, 0x24, 0x5a, 0x59, 0xf8, 0x96, 0x12, 0x9c, - 0x70, 0x8a, 0xb7, 0x11, 0x32, 0x70, 0xac, 0x63, 0xc8, 0xc0, 0xcb, 0x50, 0x0a, 0x5b, 0xeb, 0x75, - 0x7f, 0xc7, 0x71, 0x3d, 0xe6, 0xd6, 0xa0, 0xdd, 0xfd, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0xb4, 0x08, - 0x03, 0x8e, 0x34, 0xdf, 0xa1, 0xfc, 0x20, 0x51, 0xca, 0x68, 0xc7, 0xe3, 0xa6, 0x48, 0x83, 0x9d, - 0x2a, 0x8b, 0x5e, 0x85, 0x61, 0xf1, 0x72, 0x5e, 0x24, 0xe1, 0x9d, 0x30, 0x9f, 0x37, 0x56, 0x75, - 0x24, 0x36, 0x69, 0xd1, 0x4d, 0x18, 0x8c, 0xfc, 0x06, 0x7b, 0xa3, 0x47, 0xc5, 0xbc, 0x53, 0xf9, - 0xe1, 0x0e, 0xd7, 0x14, 0x99, 0xae, 0xb5, 0x56, 0x45, 0xb1, 0xce, 0x07, 0xad, 0xf1, 0xf9, 0xce, - 0xf2, 0x1d, 0x90, 0x50, 0x64, 0x71, 0x3d, 0x97, 0xe7, 0x93, 0xc6, 0xc8, 0xcc, 0xe5, 0x20, 0x4a, - 0x62, 0x9d, 0x0d, 0xba, 0x0a, 0xe3, 0xcd, 0xc0, 0xf5, 0xd9, 0x9c, 0x50, 0x96, 0xdb, 0x49, 0x33, - 0xbb, 0x59, 0x25, 0x49, 0x80, 0xd3, 0x65, 0x58, 0xe0, 0x03, 0x01, 0x9c, 0x3c, 0xc3, 0x33, 0xb4, - 0xf0, 0xab, 0x34, 0x87, 0x61, 0x85, 0x45, 0x2b, 0x6c, 0x27, 0xe6, 0x5a, 0xa0, 0xc9, 0xa9, 0xfc, - 0xb8, 0x54, 0xba, 0xb6, 0x88, 0x0b, 0xaf, 0xea, 0x2f, 0x8e, 0x39, 0xa0, 0xba, 0x96, 0x18, 0x95, - 0x5e, 0x01, 0xc2, 0xc9, 0xb3, 0x6d, 0x9c, 0x22, 0x13, 0xb7, 0xb2, 0x58, 0x20, 0x30, 0xc0, 0x21, - 0x4e, 0xf0, 0x44, 0x1f, 0x85, 0x31, 0x11, 0x4d, 0x33, 0xee, 0xa6, 0x73, 0xf1, 0xcb, 0x07, 0x9c, - 0xc0, 0xe1, 0x14, 0x35, 0xcf, 0x90, 0xe2, 0xac, 0x37, 0x88, 0xd8, 0xfa, 0x96, 0x5d, 0x6f, 0x3b, - 0x9c, 0x3c, 0xcf, 0xf6, 0x07, 0x91, 0x21, 0x25, 0x89, 0xc5, 0x19, 0x25, 0xd0, 0x1a, 0x8c, 0x35, - 0x03, 0x42, 0x76, 0x98, 0xa0, 0x2f, 0xce, 0xb3, 0x32, 0x8f, 0xfb, 0x41, 0x5b, 0x52, 0x49, 0xe0, - 0x0e, 0x32, 0x60, 0x38, 0xc5, 0x01, 0xdd, 0x81, 0x01, 0x7f, 0x97, 0x04, 0x5b, 0xc4, 0xa9, 0x4f, - 0x5e, 0x68, 0xf3, 0x12, 0x47, 0x1c, 0x6e, 0x37, 0x04, 0x6d, 0xc2, 0xdb, 0x43, 0x82, 0x3b, 0x7b, - 0x7b, 0xc8, 0xca, 0xd0, 0xff, 0x61, 0xc1, 0x19, 0x69, 0x9c, 0xa9, 0x36, 0x69, 0xaf, 0xcf, 0xf9, - 0x5e, 0x18, 0x05, 0x3c, 0x52, 0xc5, 0xa3, 0xf9, 0xd1, 0x1b, 0xd6, 0x72, 0x0a, 0x29, 0x45, 0xf4, - 0x99, 0x3c, 0x8a, 0x10, 0xe7, 0xd7, 0x48, 0xaf, 0xa6, 0x21, 0x89, 0xe4, 0x66, 0x34, 0x13, 0x2e, - 0xbe, 0x31, 0xbf, 0x3a, 0xf9, 0x18, 0x0f, 0xb3, 0x41, 0x17, 0x43, 0x35, 0x89, 0xc4, 0x69, 0x7a, - 0x74, 0x05, 0x0a, 0x7e, 0x38, 0xf9, 0x78, 0x9b, 0x5c, 0xba, 0x7e, 0xfd, 0x46, 0x95, 0x7b, 0xfd, - 0xdd, 0xa8, 0xe2, 0x82, 0x1f, 0xca, 0x2c, 0x25, 0xf4, 0x3e, 0x16, 0x4e, 0x3e, 0xc1, 0xd5, 0x96, - 0x32, 0x4b, 0x09, 0x03, 0xe2, 0x18, 0x8f, 0xb6, 0x60, 0x34, 0x34, 0xee, 0xbd, 0xe1, 0xe4, 0x45, - 0xd6, 0x53, 0x4f, 0xe4, 0x0d, 0x9a, 0x41, 0xad, 0xa5, 0x0f, 0x30, 0xb9, 0xe0, 0x24, 0x5b, 0xbe, - 0xba, 0xb4, 0x9b, 0x77, 0x38, 0xf9, 0x64, 0x87, 0xd5, 0xa5, 0x11, 0xeb, 0xab, 0x4b, 0xe7, 0x81, - 0x13, 0x3c, 0xa7, 0xbe, 0x03, 0xc6, 0x53, 0xe2, 0xd2, 0x61, 0x3c, 0xdc, 0xa7, 0xb6, 0x61, 0xd8, - 0x98, 0x92, 0x0f, 0xd5, 0xbb, 0xe2, 0xb7, 0x4b, 0x50, 0x52, 0x56, 0x6f, 0x74, 0xd9, 0x74, 0xa8, - 0x38, 0x93, 0x74, 0xa8, 0x18, 0xa8, 0xf8, 0x75, 0xc3, 0x87, 0x62, 0x2d, 0x23, 0x18, 0x63, 0xde, - 0x06, 0xd8, 0xfd, 0x23, 0x15, 0xcd, 0x94, 0x50, 0xec, 0xda, 0x33, 0xa3, 0xa7, 0xad, 0x75, 0xe2, - 0x2a, 0x8c, 0x7b, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, 0xd1, 0x8d, - 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x39, 0x84, 0xcb, 0x51, 0x58, 0x60, - 0xe9, 0xdd, 0x90, 0xff, 0x0a, 0x27, 0xc7, 0xf2, 0xef, 0x86, 0xbc, 0x50, 0x52, 0x18, 0x0b, 0xa5, - 0x30, 0xc6, 0xb4, 0xff, 0x4d, 0xbf, 0xbe, 0x54, 0x11, 0x62, 0xbe, 0x16, 0x49, 0xb8, 0xbe, 0x54, - 0xc1, 0x1c, 0x87, 0x66, 0xa0, 0x8f, 0xfd, 0x08, 0x27, 0x87, 0xf2, 0xa3, 0xe1, 0xb0, 0x12, 0x5a, - 0x96, 0x34, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0xbb, 0x4b, 0xef, 0x46, 0x4c, 0xbb, 0xdb, 0xff, 0x80, - 0xda, 0x5d, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x5d, 0x38, 0x69, 0xdc, 0x47, 0xd5, 0xab, 0x1d, 0xc8, - 0x37, 0xfc, 0x26, 0x88, 0x67, 0xcf, 0x89, 0x46, 0x9f, 0x5c, 0xca, 0xe2, 0x84, 0xb3, 0x2b, 0x40, - 0x0d, 0x18, 0xaf, 0xa5, 0x6a, 0x1d, 0xe8, 0xbe, 0x56, 0x35, 0x2f, 0xd2, 0x35, 0xa6, 0x19, 0xa3, - 0x57, 0x61, 0xe0, 0x6d, 0x3f, 0x64, 0x47, 0xa4, 0xb8, 0x9a, 0xc8, 0x70, 0x0e, 0x03, 0x6f, 0xdc, - 0xa8, 0x32, 0xf8, 0xc1, 0x7e, 0x79, 0xb0, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x0a, 0xa0, 0xef, 0xb7, - 0x60, 0x2a, 0x7d, 0xe1, 0x55, 0x8d, 0x1e, 0xee, 0xbe, 0xd1, 0xb6, 0xa8, 0x74, 0x6a, 0x21, 0x97, - 0x1d, 0x6e, 0x53, 0x15, 0xfa, 0x10, 0x5d, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x14, 0xb3, 0x8f, 0xc6, - 0xeb, 0x89, 0x42, 0x0f, 0xf6, 0xcb, 0xa3, 0x7c, 0x67, 0x74, 0xef, 0xc9, 0xe7, 0x4d, 0xa2, 0x00, - 0xfa, 0x4e, 0x38, 0x19, 0xa4, 0x35, 0xa8, 0x44, 0x0a, 0xe1, 0x4f, 0x77, 0xb3, 0xcb, 0x26, 0x07, - 0x1c, 0x67, 0x31, 0xc4, 0xd9, 0xf5, 0xd8, 0xbf, 0x62, 0x31, 0xfd, 0xb6, 0x68, 0x16, 0x09, 0x5b, - 0x8d, 0xe3, 0x48, 0x6c, 0xbd, 0x60, 0xd8, 0x8e, 0x1f, 0xd8, 0xb1, 0xe8, 0x1f, 0x59, 0xcc, 0xb1, - 0xe8, 0x18, 0x5f, 0x31, 0xbd, 0x01, 0x03, 0x91, 0x4c, 0x38, 0xde, 0x26, 0x17, 0xb7, 0xd6, 0x28, - 0xe6, 0x5c, 0xa5, 0x2e, 0x39, 0x2a, 0xb7, 0xb8, 0x62, 0x63, 0xff, 0x7d, 0x3e, 0x02, 0x12, 0x73, - 0x0c, 0x26, 0xba, 0x79, 0xd3, 0x44, 0x57, 0xee, 0xf0, 0x05, 0x39, 0xa6, 0xba, 0xbf, 0x67, 0xb6, - 0x9b, 0x29, 0xf7, 0xde, 0xed, 0x1e, 0x6d, 0xf6, 0x17, 0x2c, 0x80, 0x38, 0xc8, 0x7c, 0x17, 0x29, - 0x25, 0x5f, 0xa6, 0xd7, 0x1a, 0x3f, 0xf2, 0x6b, 0x7e, 0x43, 0x18, 0x28, 0xce, 0xc6, 0x56, 0x42, - 0x0e, 0x3f, 0xd0, 0x7e, 0x63, 0x45, 0x8d, 0xca, 0x32, 0xa4, 0x65, 0x31, 0xb6, 0x5b, 0x1b, 0xe1, - 0x2c, 0xbf, 0x64, 0xc1, 0x89, 0x2c, 0x97, 0x78, 0x7a, 0x49, 0xe6, 0x6a, 0x4e, 0xe5, 0x6d, 0xa8, - 0x46, 0xf3, 0x96, 0x80, 0x63, 0x45, 0xd1, 0x75, 0xae, 0xce, 0xc3, 0x45, 0x77, 0xbf, 0x01, 0xc3, - 0x95, 0x80, 0x68, 0xf2, 0xc5, 0x6b, 0x3c, 0x4c, 0x0a, 0x6f, 0xcf, 0xb3, 0x87, 0x0e, 0x91, 0x62, - 0x7f, 0xb9, 0x00, 0x27, 0xb8, 0xd3, 0xce, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x5d, 0x3c, 0x64, - 0x7c, 0x13, 0x86, 0x9a, 0x9a, 0x6e, 0xba, 0x5d, 0xa4, 0x62, 0x5d, 0x87, 0x1d, 0x6b, 0xd3, 0x74, - 0x28, 0x36, 0x78, 0xa1, 0x3a, 0x0c, 0x91, 0x5d, 0xb7, 0xa6, 0x3c, 0x3f, 0x0a, 0x87, 0x3e, 0xa4, - 0x55, 0x2d, 0x0b, 0x1a, 0x1f, 0x6c, 0x70, 0x7d, 0x08, 0x19, 0xf4, 0xed, 0x1f, 0xb5, 0xe0, 0x74, - 0x4e, 0x5c, 0x63, 0x5a, 0xdd, 0x1d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61, - 0x81, 0x45, 0x1f, 0x03, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x00, 0x6b, 0xc4, 0xae, 0xd4, - 0xc2, 0x10, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xa9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40, - 0xff, 0x16, 0xcf, 0x54, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x7e, 0x15, 0x8f, 0x9b, 0x06, 0xc5, - 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x30, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2, - 0x1c, 0xd8, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1, - 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x30, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e, - 0x85, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2, - 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6, - 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa, - 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd, - 0xd3, 0x05, 0x30, 0x86, 0xf6, 0xdb, 0x37, 0x85, 0x19, 0xfd, 0xb2, 0xcd, 0xa0, 0x59, 0x13, 0x0e, - 0x65, 0x99, 0x5f, 0x16, 0xe7, 0x2f, 0xe6, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8a, 0xae, 0xf1, 0x93, - 0x95, 0xc0, 0xa7, 0x87, 0x9c, 0x0c, 0xa4, 0xa7, 0x1e, 0x9f, 0xf4, 0xcb, 0x20, 0x03, 0x6d, 0x42, - 0xce, 0x0a, 0xf7, 0x7c, 0xce, 0xc1, 0xf0, 0xbd, 0xaa, 0x8a, 0x68, 0x1f, 0x92, 0x0b, 0xba, 0x02, - 0x83, 0x22, 0x2f, 0x15, 0x7b, 0x23, 0xc1, 0x17, 0x13, 0xf3, 0x15, 0x9b, 0x8f, 0xc1, 0x58, 0xa7, - 0xb1, 0x7f, 0xa0, 0x00, 0x13, 0x19, 0x8f, 0xdc, 0xf8, 0x31, 0xb2, 0xe9, 0x86, 0x91, 0x4a, 0x91, - 0xac, 0x1d, 0x23, 0x1c, 0x8e, 0x15, 0x05, 0xdd, 0xab, 0xf8, 0x41, 0x95, 0x3c, 0x9c, 0xc4, 0x23, - 0x12, 0x81, 0x3d, 0x64, 0xb2, 0xe1, 0x0b, 0xd0, 0xd3, 0x0a, 0x89, 0x0c, 0x16, 0xad, 0x8e, 0x6d, - 0x66, 0xd6, 0x66, 0x18, 0x7a, 0x05, 0xdc, 0x54, 0x16, 0x62, 0xed, 0x0a, 0xc8, 0x6d, 0xc4, 0x1c, - 0x47, 0x1b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0x71, 0x51, 0x8c, 0xa3, 0x9e, 0x32, 0x28, 0x16, 0x58, - 0xfb, 0x8b, 0x45, 0x38, 0x93, 0xfb, 0xec, 0x95, 0x36, 0x7d, 0xc7, 0xf7, 0xdc, 0xc8, 0x57, 0x4e, - 0x78, 0x3c, 0xd2, 0x29, 0x69, 0x6e, 0xad, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x22, 0xf4, 0x32, 0xa5, - 0x78, 0x2a, 0x59, 0xf4, 0xec, 0x3c, 0x0f, 0x7d, 0xc7, 0xd1, 0x5d, 0xe7, 0xf7, 0x7f, 0x8c, 0x4a, - 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, 0x42, 0xf4, 0x57, - 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0x7b, 0x81, 0xeb, - 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x73, 0x30, 0x96, 0x78, 0x33, 0x6f, 0x69, 0xff, 0x51, 0x27, 0xe6, - 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, 0x71, 0x33, 0x3d, - 0x10, 0x47, 0x9d, 0x98, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0x3b, 0x96, 0x88, 0x59, - 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x83, 0xde, 0x80, 0x56, 0x9a, 0xcc, 0x12, 0xcd, 0x5a, - 0x82, 0x39, 0x0e, 0x9d, 0x85, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, 0x9e, 0x77, 0x22, - 0x07, 0x33, 0x28, 0x0b, 0xfc, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, 0xef, 0x8e, - 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0x81, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, 0xff, 0x79, 0x01, - 0xce, 0x67, 0x96, 0xeb, 0x3a, 0xf0, 0x5b, 0xfb, 0xd2, 0x0f, 0x33, 0xff, 0x51, 0xf1, 0x18, 0x7d, - 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xc7, 0x96, 0xd9, 0x65, 0xef, 0x92, 0x78, 0x6c, - 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x12, 0xdd, 0x67, - 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, 0xa3, 0x3b, 0xae, - 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, 0xf4, 0xc8, 0xd5, - 0x62, 0xb5, 0xf1, 0xaf, 0x7b, 0xf5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, 0xbd, 0x98, 0x11, - 0xb7, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, 0xa6, 0x5e, 0x85, - 0xe1, 0x07, 0xb6, 0x8d, 0xd8, 0x5f, 0x2f, 0xc2, 0x23, 0x6d, 0x96, 0x3d, 0xdf, 0xeb, 0x8d, 0x31, - 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, 0x4f, 0xa0, 0x48, - 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, 0x92, 0xf4, 0x8a, - 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x2a, 0x8c, - 0x3b, 0xbb, 0x8e, 0xcb, 0x03, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, 0x33, 0x49, 0x02, - 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0xab, 0xc4, 0x13, 0x56, 0x77, - 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, 0xc9, 0xfa, 0xf2, - 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0xa6, 0xde, 0xba, 0x02, 0xc3, 0x87, 0x74, 0xff, 0xb5, 0xff, - 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x46, 0xfd, 0x7d, 0x95, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0x2d, - 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a, - 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x0e, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, 0xec, 0xd0, - 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37, - 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xd5, 0x38, 0xc9, 0x9f, 0x68, 0x17, 0x9f, 0x91, 0x35, - 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xcc, 0xaa, 0xfd, 0xc9, 0xfd, 0x0f, 0x2c, - 0x18, 0x37, 0xe8, 0x8f, 0xe1, 0x00, 0x59, 0x34, 0x0f, 0x90, 0x47, 0x3b, 0x7e, 0x43, 0xce, 0xc1, - 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x36, 0xf4, 0x6c, 0x39, 0x41, 0xbd, 0x5d, 0x3e, - 0x9a, 0x54, 0xa1, 0xe9, 0x6b, 0x4e, 0x20, 0x3c, 0x15, 0x9e, 0x95, 0xbd, 0x4e, 0x41, 0x1d, 0xbd, - 0x14, 0x58, 0x55, 0xe8, 0x65, 0xe8, 0x0b, 0x6b, 0x7e, 0x53, 0xbd, 0x99, 0xba, 0xc0, 0x3a, 0x9a, - 0x41, 0x0e, 0xf6, 0xcb, 0xc8, 0xac, 0x8e, 0x82, 0xb1, 0xa0, 0x47, 0x6f, 0xc2, 0x30, 0xfb, 0xa5, - 0xdc, 0x06, 0x8b, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x7d, 0x6a, 0x0d, 0x10, 0x36, 0x59, 0x4d, - 0x6d, 0x42, 0x49, 0x7d, 0xd6, 0x43, 0xb5, 0x76, 0xff, 0xab, 0x22, 0x4c, 0x64, 0xcc, 0x39, 0x14, - 0x1a, 0x23, 0x71, 0xa5, 0xcb, 0xa9, 0xfa, 0x0e, 0xc7, 0x22, 0x64, 0x17, 0xa8, 0xba, 0x98, 0x5b, - 0x5d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x73, 0xa5, 0xb4, 0xb2, 0x63, 0xeb, 0x6a, - 0x5a, 0x91, 0x6a, 0xe9, 0x43, 0x1d, 0xd3, 0x5f, 0xef, 0x81, 0x13, 0x59, 0x21, 0x63, 0xd1, 0x67, - 0x13, 0xd9, 0x90, 0x5f, 0xec, 0x36, 0xd8, 0x2c, 0x4f, 0x91, 0x2c, 0xc2, 0x40, 0x4e, 0x9b, 0xf9, - 0x91, 0x3b, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0xa0, 0x09, 0x78, 0x16, 0x6b, 0xb9, 0x7d, 0x7c, 0xa0, - 0xeb, 0x06, 0x88, 0xf4, 0xd7, 0x61, 0xc2, 0x25, 0x49, 0x82, 0x3b, 0xbb, 0x24, 0xc9, 0x9a, 0xd1, - 0x12, 0xf4, 0xd5, 0xb8, 0xaf, 0x4b, 0xb1, 0xf3, 0x16, 0xc6, 0x1d, 0x5d, 0xd4, 0x06, 0x2c, 0x1c, - 0x5c, 0x04, 0x83, 0x29, 0x17, 0x06, 0xb5, 0x8e, 0x79, 0xa8, 0x93, 0x67, 0x9b, 0x1e, 0x7c, 0x5a, - 0x17, 0x3c, 0xd4, 0x09, 0xf4, 0xa3, 0x16, 0x24, 0x1e, 0xbc, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94, - 0xbb, 0x00, 0x3d, 0x81, 0xdf, 0x20, 0xc9, 0x0c, 0xc4, 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22, - 0x8a, 0x55, 0x2d, 0x43, 0xfa, 0x35, 0x52, 0x5c, 0x10, 0x1f, 0x83, 0xde, 0x06, 0xd9, 0x25, 0x8d, - 0x64, 0xa2, 0xb8, 0x65, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x62, 0x0f, 0x9c, 0x6b, 0x1b, 0x0d, 0x8a, - 0x5e, 0xc6, 0x36, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x92, 0x19, 0x9d, 0xae, 0x72, 0x30, 0x96, 0x78, - 0xf6, 0xfc, 0x93, 0x27, 0x66, 0x48, 0xa8, 0x30, 0x45, 0x3e, 0x06, 0x81, 0x35, 0x55, 0x62, 0xc5, - 0xa3, 0x50, 0x89, 0x3d, 0x0f, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, 0xef, 0x4a, 0xe3, 0x04, - 0x1e, 0xd5, 0x65, 0x81, 0xc1, 0x1a, 0x15, 0x9a, 0x87, 0xb1, 0x66, 0xe0, 0x47, 0x5c, 0x23, 0x3c, - 0xcf, 0x3d, 0x67, 0x7b, 0xcd, 0x40, 0x3c, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x97, 0x60, 0x50, - 0x04, 0xe7, 0xa9, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, 0x46, 0x61, 0x9d, 0x4e, - 0x2b, 0xc6, 0xd4, 0xcc, 0xfd, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x44, 0xa2, 0x1e, 0xe8, - 0x2a, 0x12, 0x75, 0xac, 0x96, 0x2b, 0x75, 0x6d, 0xf5, 0x84, 0x8e, 0x8a, 0xac, 0xaf, 0xf4, 0xc0, - 0x84, 0x98, 0x38, 0x0f, 0x7b, 0xba, 0xdc, 0x4c, 0x4f, 0x97, 0xa3, 0x50, 0xdc, 0xbd, 0x37, 0x67, - 0x8e, 0x7b, 0xce, 0xfc, 0xb0, 0x05, 0xa6, 0xa4, 0x86, 0xfe, 0xb7, 0xdc, 0x94, 0x78, 0x2f, 0xe5, - 0x4a, 0x7e, 0x71, 0x94, 0xdf, 0x77, 0x96, 0x1c, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xda, 0x91, 0x23, - 0x5a, 0x80, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb, - 0xb8, 0x24, 0x5a, 0x48, 0xe5, 0x1e, 0x7c, 0x2a, 0x23, 0xf7, 0xe0, 0x49, 0xa3, 0x7b, 0x1e, 0x30, - 0xf9, 0xe0, 0x0f, 0xd2, 0x13, 0xc7, 0x78, 0xd5, 0x86, 0x3e, 0x60, 0x28, 0x1d, 0xed, 0x84, 0xd2, - 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x51, 0x18, 0x63, 0x51, 0xfb, 0xd8, 0x3b, 0x0f, 0xf1, 0xde, - 0xae, 0x10, 0xfb, 0x72, 0x2f, 0x27, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x69, 0x11, 0xfa, 0xf8, 0xf2, - 0x3b, 0x86, 0xeb, 0xe5, 0x33, 0x50, 0x72, 0x77, 0x76, 0x5a, 0x3c, 0x9d, 0x5c, 0x6f, 0xec, 0x19, - 0xbc, 0x24, 0x81, 0x38, 0xc6, 0xa3, 0x45, 0xa1, 0xef, 0x6e, 0x13, 0x18, 0x98, 0x37, 0x7c, 0x7a, - 0xde, 0x89, 0x1c, 0x2e, 0x2b, 0xa9, 0x73, 0x36, 0xd6, 0x8c, 0xa3, 0x4f, 0x01, 0x84, 0x51, 0xe0, - 0x7a, 0x9b, 0x14, 0x26, 0x62, 0xab, 0x3f, 0xdd, 0x86, 0x5b, 0x55, 0x11, 0x73, 0x9e, 0xf1, 0x9e, - 0xa3, 0x10, 0x58, 0xe3, 0x88, 0xa6, 0x8d, 0x93, 0x7e, 0x2a, 0x31, 0x76, 0xc0, 0xb9, 0xc6, 0x63, - 0x36, 0xf5, 0x41, 0x28, 0x29, 0xe6, 0x9d, 0xb4, 0x5f, 0x43, 0xba, 0x58, 0xf4, 0x11, 0x18, 0x4d, - 0xb4, 0xed, 0x50, 0xca, 0xb3, 0x5f, 0xb2, 0x60, 0x94, 0x37, 0x66, 0xc1, 0xdb, 0x15, 0xa7, 0xc1, - 0x3d, 0x38, 0xd1, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0x77, 0xbf, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16, - 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, 0xa7, 0x21, 0xe2, 0x1b, 0x0c, 0xf1, 0xd5, - 0xc6, 0x61, 0x58, 0x61, 0xed, 0x3f, 0xb0, 0x60, 0x9c, 0xb7, 0xfc, 0x3a, 0xd9, 0x53, 0x7b, 0xd3, - 0x37, 0xb3, 0xed, 0x22, 0x91, 0x69, 0x21, 0x27, 0x91, 0xa9, 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, - 0x65, 0x0b, 0xc4, 0x0c, 0x39, 0x06, 0x7d, 0xc6, 0x77, 0x98, 0xfa, 0x8c, 0xa9, 0xfc, 0x45, 0x90, - 0xa3, 0xc8, 0xf8, 0x2b, 0x0b, 0xc6, 0x38, 0x41, 0x6c, 0xab, 0xff, 0xa6, 0x8e, 0xc3, 0xac, 0xf9, - 0x45, 0x99, 0xce, 0x97, 0xd7, 0xc9, 0xde, 0x9a, 0x5f, 0x71, 0xa2, 0xad, 0xec, 0x8f, 0x32, 0x06, - 0xab, 0xa7, 0xed, 0x60, 0xd5, 0xe5, 0x02, 0x32, 0xf2, 0x7c, 0x75, 0x08, 0x10, 0x70, 0xd8, 0x3c, - 0x5f, 0xf6, 0x9f, 0x59, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, 0x06, 0xd5, 0x0e, 0xba, - 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x49, 0xf7, 0x24, 0x1c, 0x2e, 0x8a, 0x9d, 0x1d, 0x2e, - 0x0e, 0xd1, 0xa3, 0xff, 0xac, 0x0f, 0x92, 0x2f, 0xfb, 0xd0, 0x2d, 0x18, 0xaa, 0x39, 0x4d, 0x67, - 0xdd, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x76, 0xde, 0x58, 0x73, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10, - 0x6c, 0xf0, 0x41, 0xd3, 0x00, 0xcd, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x93, 0xa9, 0x5d, 0x58, 0x44, - 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc8, 0x61, 0x14, 0xe0, - 0xd8, 0xc2, 0x28, 0xf4, 0x1c, 0x2a, 0x8c, 0xc2, 0xc0, 0xa1, 0xc3, 0x28, 0xf4, 0x76, 0x15, 0x46, - 0x01, 0xc3, 0x29, 0x29, 0x7b, 0xd2, 0xff, 0x8b, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33, - 0x75, 0x7f, 0xbf, 0x7c, 0x0a, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0x63, 0x30, 0xe9, 0x34, 0x1a, - 0xfe, 0x1d, 0x35, 0xa8, 0x0b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0x48, 0x3f, 0xe3, 0x7a, 0xf6, 0xfe, - 0x7e, 0x79, 0x72, 0x26, 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x86, 0x52, 0x33, 0xf0, 0x6b, 0x2b, - 0xda, 0xf3, 0xe3, 0xf3, 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x56, 0x7f, 0xd8, 0x81, - 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x78, 0xa4, 0x71, 0x11, 0xb6, 0x61, 0xa2, 0x4a, 0x02, 0xd7, - 0x69, 0xb8, 0xf7, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x35, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xab, 0x60, - 0xbd, 0x5a, 0xc2, 0x25, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0x41, 0xbf, 0x78, 0xc9, 0x77, - 0x0c, 0x52, 0xe3, 0x8c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x29, - 0x61, 0x8e, 0x78, 0xb4, 0x1d, 0x93, 0xf6, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, 0x4d, - 0xf9, 0xc3, 0xef, 0x82, 0x55, 0xe8, 0x0f, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, 0x20, - 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x10, 0x1f, 0x4b, 0x77, - 0x7a, 0x75, 0xdf, 0x73, 0x14, 0xaf, 0xee, 0xed, 0xaf, 0xb1, 0x93, 0x53, 0x87, 0x1f, 0x83, 0x50, - 0x75, 0xd5, 0x3c, 0x63, 0xed, 0x36, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x05, 0x0b, 0xce, - 0x65, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x0b, 0x03, 0x4e, 0xab, 0xee, 0xaa, 0xb5, 0xac, 0x99, 0x26, - 0x67, 0x04, 0x1c, 0x2b, 0x0a, 0x34, 0x07, 0xe3, 0xe4, 0x6e, 0xd3, 0xe5, 0x86, 0x5c, 0xdd, 0xf9, - 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x24, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, 0x44, - 0xfd, 0xbc, 0x05, 0x83, 0xea, 0x55, 0xef, 0x43, 0xef, 0xed, 0x8f, 0x9a, 0xbd, 0xfd, 0x48, 0x9b, - 0xde, 0xce, 0xe9, 0xe6, 0xdf, 0x2b, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x42, 0x82, 0x7b, 0xf0, - 0x87, 0x13, 0x57, 0x60, 0xd0, 0x69, 0x36, 0x25, 0x42, 0x7a, 0xc0, 0xb1, 0xd0, 0xeb, 0x31, 0x18, - 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x4d, 0x12, 0x51, - 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0x5a, 0x91, 0xdb, 0x98, 0x76, 0xbd, 0x28, 0x8c, 0x82, 0xe9, - 0x25, 0x2f, 0xba, 0x11, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xc1, - 0x82, 0xd5, 0xd1, 0x6b, 0xba, 0x52, 0xac, 0x0a, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, 0xc3, - 0xfa, 0xf4, 0x70, 0xe1, 0xc5, 0x7e, 0x72, 0x48, 0x8d, 0x06, 0x33, 0x8a, 0xce, 0xeb, 0x41, 0xcc, - 0xda, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x22, 0xe5, 0x1e, 0xf3, - 0x5c, 0x87, 0x53, 0xe3, 0x10, 0x0e, 0x31, 0x2c, 0x0f, 0x13, 0xcb, 0x52, 0xb3, 0x54, 0x11, 0xeb, - 0x42, 0xcb, 0xc3, 0x24, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa2, 0x38, 0x16, - 0xb0, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x0b, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x92, 0x50, 0x28, - 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x57, 0x60, 0x90, 0xdc, 0x8d, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, - 0x6f, 0x1c, 0x3f, 0x73, 0x21, 0x06, 0x63, 0x9d, 0x06, 0xad, 0xc1, 0x68, 0xc8, 0xf5, 0x6c, 0x2a, - 0x48, 0x3c, 0xd7, 0x57, 0x3e, 0xad, 0xde, 0x53, 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, - 0x32, 0x91, 0x64, 0x81, 0x5e, 0x83, 0x91, 0x86, 0xef, 0xd4, 0x67, 0x9d, 0x86, 0xe3, 0xd5, 0x58, - 0xff, 0x0c, 0x98, 0x89, 0xa8, 0x97, 0x0d, 0x2c, 0x4e, 0x50, 0x53, 0xe1, 0x4d, 0x87, 0x88, 0x30, - 0x6d, 0x8e, 0xb7, 0x49, 0x42, 0x91, 0x0f, 0x9e, 0x09, 0x6f, 0xcb, 0x39, 0x34, 0x38, 0xb7, 0x34, - 0x7a, 0x19, 0x86, 0xe4, 0xe7, 0x6b, 0x41, 0x59, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, - 0x84, 0x93, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x35, 0x11, 0xa9, 0x80, 0x3f, 0x1f, 0xfe, - 0x88, 0x7c, 0xab, 0xb8, 0x90, 0x45, 0x74, 0xb0, 0x5f, 0x3e, 0x2b, 0x7a, 0x2d, 0x13, 0x8f, 0xb3, - 0x79, 0xa3, 0x15, 0x98, 0xd8, 0x22, 0x4e, 0x23, 0xda, 0x9a, 0xdb, 0x22, 0xb5, 0x6d, 0xb9, 0xe0, - 0x58, 0x98, 0x17, 0xed, 0xe9, 0xc8, 0xb5, 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x82, 0xc9, 0x66, - 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x9c, 0x90, 0x66, 0xea, 0xf5, 0x80, 0x84, 0xfc, - 0x75, 0x29, 0x3b, 0x7a, 0x65, 0x20, 0x9d, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xee, 0xc1, 0xc9, - 0xc4, 0x44, 0x10, 0x11, 0x31, 0x46, 0xf2, 0x53, 0xc4, 0x54, 0xb3, 0x0a, 0x88, 0xe0, 0x32, 0x59, - 0x28, 0x9c, 0x5d, 0x05, 0x7a, 0x05, 0xc0, 0x6d, 0x2e, 0x3a, 0x3b, 0x6e, 0x83, 0x5e, 0x15, 0x27, - 0xd8, 0x1c, 0xa1, 0xd7, 0x06, 0x58, 0xaa, 0x48, 0x28, 0xdd, 0x9b, 0xc5, 0xbf, 0x3d, 0xac, 0x51, - 0xa3, 0x65, 0x18, 0x11, 0xff, 0xf6, 0xc4, 0x90, 0xf2, 0xc0, 0x2c, 0x8f, 0xb3, 0xa8, 0x5a, 0x15, - 0x1d, 0x73, 0x90, 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x09, 0xe7, 0x64, 0xa2, 0x3f, 0x7d, 0x7e, 0xca, - 0x31, 0x08, 0x59, 0x5e, 0x96, 0x01, 0xfe, 0x2a, 0x65, 0xa6, 0x1d, 0x21, 0x6e, 0xcf, 0x87, 0x9e, - 0xeb, 0xfa, 0x34, 0xe7, 0x6f, 0x8e, 0x4f, 0xc6, 0x11, 0x07, 0x97, 0x93, 0x48, 0x9c, 0xa6, 0x47, - 0x3e, 0x9c, 0x74, 0xbd, 0xac, 0x59, 0x7d, 0x8a, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x7e, 0x46, - 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, 0xe6, 0xf7, 0xf7, 0xfb, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8, - 0xd3, 0x30, 0xa4, 0x7f, 0x94, 0x90, 0x34, 0x2e, 0x66, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b, - 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, 0xa2, 0x5a, 0x46, 0x6c, 0x83, 0xcb, 0xdd, 0x49, 0x32, 0xdd, - 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, 0x68, 0x19, 0x06, 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x52, 0xa5, - 0x5d, 0xf4, 0xc6, 0x39, 0x41, 0x23, 0xd6, 0x8f, 0x48, 0xb1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x7f, - 0xb3, 0x00, 0xe5, 0x0e, 0xf9, 0x7a, 0x12, 0x66, 0x28, 0xab, 0x2b, 0x33, 0xd4, 0x0c, 0x8c, 0xc6, - 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x65, 0xa2, 0x71, 0x92, 0xbe, 0xeb, 0x47, 0x09, 0xba, - 0x25, 0xab, 0xa7, 0xe3, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, 0xdb, 0xfd, 0xb5, 0x37, 0xd7, 0x1a, 0x69, - 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xfb, 0x76, 0xdc, 0xcd, 0x74, 0xc7, 0x1d, 0x81, 0x2d, - 0xd7, 0xbe, 0x01, 0x7d, 0x3c, 0x1c, 0x65, 0x17, 0xe2, 0xf6, 0x63, 0x66, 0x94, 0x6c, 0x25, 0xe1, - 0x19, 0x91, 0xb2, 0xbf, 0xdf, 0x82, 0xd1, 0xc4, 0xeb, 0x36, 0x84, 0xb5, 0x27, 0xd0, 0x0f, 0x22, - 0x12, 0x67, 0x09, 0xdb, 0x17, 0xa0, 0x67, 0xcb, 0x0f, 0xa3, 0xa4, 0xa3, 0xc7, 0x35, 0x3f, 0x8c, - 0x30, 0xc3, 0xd8, 0x7f, 0x68, 0x41, 0xef, 0x9a, 0xe3, 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, - 0x81, 0x6e, 0xbe, 0x0b, 0xbd, 0x04, 0x7d, 0x64, 0x63, 0x83, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0x14, - 0x42, 0xdf, 0x02, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x43, 0x29, - 0x72, 0x77, 0xc8, 0x4c, 0xbd, 0x2e, 0x4c, 0xe5, 0x0f, 0x10, 0xbf, 0x63, 0x4d, 0x32, 0xc0, 0x31, - 0x2f, 0xfb, 0x8b, 0x05, 0x80, 0x38, 0x8e, 0x57, 0xa7, 0x4f, 0x9c, 0x4d, 0x19, 0x51, 0x2f, 0x66, - 0x18, 0x51, 0x51, 0xcc, 0x30, 0xc3, 0x82, 0xaa, 0xba, 0xa9, 0xd8, 0x55, 0x37, 0xf5, 0x1c, 0xa6, - 0x9b, 0xe6, 0x60, 0x3c, 0x8e, 0x43, 0x66, 0x86, 0x61, 0x64, 0x47, 0xe7, 0x5a, 0x12, 0x89, 0xd3, - 0xf4, 0x36, 0x81, 0x0b, 0x2a, 0x1c, 0x93, 0x38, 0xd1, 0x98, 0x1f, 0xb8, 0x6e, 0x94, 0xee, 0xd0, - 0x4f, 0xb1, 0x95, 0xb8, 0x90, 0x6b, 0x25, 0xfe, 0x09, 0x0b, 0x4e, 0x24, 0xeb, 0x61, 0x8f, 0xa6, - 0xbf, 0x60, 0xc1, 0x49, 0x66, 0x2b, 0x67, 0xb5, 0xa6, 0x2d, 0xf3, 0x2f, 0xb6, 0x0d, 0x31, 0x95, - 0xd3, 0xe2, 0x38, 0xe6, 0xc6, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, 0x7b, 0x60, 0x32, - 0x2f, 0x36, 0x15, 0x7b, 0x26, 0xe2, 0xdc, 0xad, 0x6e, 0x93, 0x3b, 0xc2, 0x19, 0x3f, 0x7e, 0x26, - 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0xe9, 0x4f, 0x0a, 0x5d, 0xa6, 0x3f, 0xd9, 0x82, 0xf1, 0x3b, 0x5b, - 0xc4, 0xbb, 0xe9, 0x85, 0x4e, 0xe4, 0x86, 0x1b, 0x2e, 0xb3, 0x2b, 0xf3, 0x79, 0x23, 0x73, 0x50, - 0x8f, 0xdf, 0x4e, 0x12, 0x1c, 0xec, 0x97, 0xcf, 0x19, 0x80, 0xb8, 0xc9, 0x7c, 0x23, 0xc1, 0x69, - 0xa6, 0xe9, 0xec, 0x31, 0x3d, 0x0f, 0x39, 0x7b, 0xcc, 0x8e, 0x2b, 0xbc, 0x51, 0xe4, 0x1b, 0x00, - 0x76, 0x63, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x02, 0xd2, 0x33, 0x74, 0x19, 0xa1, 0x41, - 0x9f, 0xbb, 0xbf, 0x5f, 0x46, 0xab, 0x29, 0xec, 0xc1, 0x7e, 0x79, 0x82, 0x42, 0x97, 0x3c, 0x7a, - 0xf3, 0x8c, 0xe3, 0xa9, 0x65, 0x30, 0x42, 0xb7, 0x61, 0x8c, 0x42, 0xd9, 0x8a, 0x92, 0x71, 0x47, - 0xf9, 0x6d, 0xf1, 0x99, 0xfb, 0xfb, 0xe5, 0xb1, 0xd5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, - 0x05, 0x46, 0xe2, 0x79, 0x75, 0x9d, 0xec, 0xf1, 0x00, 0x3d, 0x25, 0xae, 0xf0, 0x5e, 0x31, 0x30, - 0x38, 0x41, 0x69, 0x7f, 0xc1, 0x82, 0x33, 0xb9, 0x19, 0xf1, 0xd1, 0x25, 0x18, 0x70, 0x9a, 0x2e, - 0x37, 0x5f, 0x88, 0xa3, 0x86, 0xa9, 0xc9, 0x2a, 0x4b, 0xdc, 0x78, 0xa1, 0xb0, 0x74, 0x87, 0xdf, - 0x76, 0xbd, 0x7a, 0x72, 0x87, 0xbf, 0xee, 0x7a, 0x75, 0xcc, 0x30, 0xea, 0xc8, 0x2a, 0xe6, 0x3e, - 0x45, 0xf8, 0x0a, 0x5d, 0xab, 0x19, 0xb9, 0xf3, 0x8f, 0xb7, 0x19, 0xe8, 0x19, 0xdd, 0xd4, 0x28, - 0xbc, 0x0a, 0x73, 0xcd, 0x8c, 0xdf, 0x67, 0x81, 0x78, 0xba, 0xdc, 0xc5, 0x99, 0xfc, 0x26, 0x0c, - 0xed, 0xa6, 0xb3, 0x17, 0x5e, 0xc8, 0x7f, 0xcb, 0x2d, 0x22, 0xae, 0x2b, 0x41, 0xdb, 0xc8, 0x54, - 0x68, 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xf3, 0x84, 0x19, 0x14, 0x3a, 0xb7, 0xe6, 0x79, 0x80, 0x3a, - 0xa3, 0x65, 0x29, 0x8d, 0x0b, 0xa6, 0xc4, 0x35, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x79, 0x01, - 0x06, 0x65, 0xb6, 0xbc, 0x96, 0xd7, 0x8d, 0xda, 0xef, 0x50, 0xe9, 0xb3, 0xd1, 0x65, 0x28, 0x31, - 0xbd, 0x74, 0x25, 0xd6, 0x96, 0x2a, 0xad, 0xd0, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xee, 0x8e, 0x61, - 0x6b, 0x9d, 0x91, 0x27, 0x1e, 0xda, 0x56, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x8c, 0xf1, 0x72, - 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x2d, 0xab, 0x57, 0x45, 0x2f, 0x19, 0x5b, 0x49, 0xe0, 0x0e, 0xf6, - 0xcb, 0x27, 0x92, 0x30, 0x66, 0xa4, 0x4d, 0x71, 0x61, 0x2e, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, - 0x3c, 0xdd, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x03, 0x4a, 0xe7, 0x0d, 0x44, 0xaf, 0x73, 0x97, - 0x67, 0x37, 0x20, 0xf5, 0x76, 0x46, 0x5b, 0x3d, 0x46, 0x87, 0x7c, 0x23, 0xc7, 0x4b, 0x61, 0x55, - 0xde, 0xfe, 0x3f, 0x8b, 0x30, 0x96, 0x8c, 0x0a, 0x80, 0xae, 0x41, 0x1f, 0x17, 0x29, 0x05, 0xfb, - 0x36, 0x3e, 0x41, 0x5a, 0x2c, 0x01, 0x76, 0xb8, 0x0a, 0xa9, 0x54, 0x94, 0x47, 0x6f, 0xc1, 0x60, - 0xdd, 0xbf, 0xe3, 0xdd, 0x71, 0x82, 0xfa, 0x4c, 0x65, 0x49, 0x4c, 0xe7, 0x4c, 0x45, 0xc5, 0x7c, - 0x4c, 0xa6, 0xc7, 0x27, 0x60, 0xf6, 0xef, 0x18, 0x85, 0x75, 0x76, 0x68, 0x8d, 0x25, 0xfa, 0xd8, - 0x70, 0x37, 0x57, 0x9c, 0x66, 0xbb, 0xf7, 0x2f, 0x73, 0x92, 0x48, 0xe3, 0x3c, 0x2c, 0xb2, 0x81, - 0x70, 0x04, 0x8e, 0x19, 0xa1, 0xcf, 0xc2, 0x44, 0x98, 0x63, 0x3a, 0xc9, 0x4b, 0x23, 0xdb, 0xce, - 0x9a, 0x30, 0x7b, 0xfa, 0xfe, 0x7e, 0x79, 0x22, 0xcb, 0xc8, 0x92, 0x55, 0x8d, 0xfd, 0xa5, 0x13, - 0x60, 0x2c, 0x62, 0x23, 0xab, 0xb8, 0x75, 0x44, 0x59, 0xc5, 0x31, 0x0c, 0x90, 0x9d, 0x66, 0xb4, - 0x37, 0xef, 0x06, 0x62, 0x4c, 0x32, 0x79, 0x2e, 0x08, 0x9a, 0x34, 0x4f, 0x89, 0xc1, 0x8a, 0x4f, - 0x76, 0xea, 0xf7, 0xe2, 0x37, 0x31, 0xf5, 0x7b, 0xcf, 0x31, 0xa6, 0x7e, 0x5f, 0x85, 0xfe, 0x4d, - 0x37, 0xc2, 0xa4, 0xe9, 0x8b, 0xcb, 0x5c, 0xe6, 0x3c, 0xbc, 0xca, 0x49, 0xd2, 0x49, 0x86, 0x05, - 0x02, 0x4b, 0x26, 0xe8, 0x75, 0xb5, 0x02, 0xfb, 0xf2, 0x15, 0x2e, 0x69, 0xe7, 0x95, 0xcc, 0x35, - 0x28, 0x12, 0xbc, 0xf7, 0x3f, 0x68, 0x82, 0xf7, 0x45, 0x99, 0x96, 0x7d, 0x20, 0xff, 0xb1, 0x1a, - 0xcb, 0xba, 0xde, 0x21, 0x19, 0xfb, 0x2d, 0x3d, 0x95, 0x7d, 0x29, 0x7f, 0x27, 0x50, 0x59, 0xea, - 0xbb, 0x4c, 0x60, 0xff, 0x7d, 0x16, 0x9c, 0x4c, 0xa6, 0x9a, 0x65, 0x6f, 0x2a, 0x84, 0x9f, 0xc7, - 0x4b, 0xdd, 0xe4, 0xfe, 0x65, 0x05, 0x8c, 0x0a, 0x99, 0x8e, 0x34, 0x93, 0x0c, 0x67, 0x57, 0x47, - 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0x7f, 0x83, 0xc7, 0x72, 0x32, 0xe1, 0xb7, 0xc9, 0x7f, 0xbf, 0x96, - 0x91, 0x75, 0xfd, 0xf1, 0xbc, 0xac, 0xeb, 0x5d, 0xe7, 0x5a, 0x7f, 0x5d, 0xe5, 0xc0, 0x1f, 0xce, - 0x9f, 0x4a, 0x3c, 0xc3, 0x7d, 0xc7, 0xcc, 0xf7, 0xaf, 0xab, 0xcc, 0xf7, 0x6d, 0x22, 0x8b, 0xf3, - 0xbc, 0xf6, 0x1d, 0xf3, 0xdd, 0x6b, 0x39, 0xeb, 0x47, 0x8f, 0x26, 0x67, 0xbd, 0x71, 0xd4, 0xf0, - 0xb4, 0xe9, 0xcf, 0x74, 0x38, 0x6a, 0x0c, 0xbe, 0xed, 0x0f, 0x1b, 0x9e, 0x9f, 0x7f, 0xfc, 0x81, - 0xf2, 0xf3, 0xdf, 0xd2, 0xf3, 0xdd, 0xa3, 0x0e, 0x09, 0xdd, 0x29, 0x51, 0x97, 0x59, 0xee, 0x6f, - 0xe9, 0x07, 0xe0, 0x44, 0x3e, 0x5f, 0x75, 0xce, 0xa5, 0xf9, 0x66, 0x1e, 0x81, 0xa9, 0xec, 0xf9, - 0x27, 0x8e, 0x27, 0x7b, 0xfe, 0xc9, 0x23, 0xcf, 0x9e, 0x7f, 0xea, 0x18, 0xb2, 0xe7, 0x9f, 0x3e, - 0xc6, 0xec, 0xf9, 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0x22, 0xa1, 0x3f, 0x95, 0x13, 0x3f, - 0x2d, 0x1d, 0x25, 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x56, 0xfe, 0xc9, 0x87, 0x90, - 0x95, 0x7f, 0x35, 0xce, 0xca, 0x7f, 0x26, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0xe2, 0xbf, - 0xa5, 0xe7, 0xd0, 0x7f, 0xa4, 0x8d, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x26, 0x73, 0xfe, 0x6b, 0x3c, - 0x73, 0xfe, 0xd9, 0xfc, 0x9d, 0x3c, 0x79, 0xdc, 0x19, 0xf9, 0xf2, 0x69, 0xbb, 0x54, 0xf0, 0x57, - 0x16, 0xf3, 0x3d, 0xa7, 0x5d, 0x2a, 0x7a, 0x6c, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, 0x0f, - 0x14, 0xe0, 0x7c, 0xfb, 0xf5, 0x16, 0x6b, 0xc9, 0x2b, 0xb1, 0x43, 0x40, 0x42, 0x4b, 0xce, 0xef, - 0x6c, 0x31, 0x55, 0xd7, 0xf1, 0x20, 0xaf, 0xc2, 0xb8, 0x7a, 0x87, 0xd3, 0x70, 0x6b, 0x7b, 0xab, - 0xf1, 0x35, 0x59, 0x45, 0x4e, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, 0xb8, - 0x34, 0x2f, 0xee, 0x66, 0x71, 0x94, 0x71, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x59, 0x70, 0x3a, - 0x27, 0xe5, 0x6b, 0xd7, 0xe1, 0x0e, 0x37, 0x60, 0xb4, 0x69, 0x16, 0xed, 0x10, 0xa1, 0xd5, 0x48, - 0x2c, 0xab, 0xda, 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0a, 0x70, 0xae, 0xad, 0x63, 0x29, - 0xc2, 0x70, 0x6a, 0x73, 0x27, 0x74, 0xe6, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x93, - 0xd4, 0x34, 0x3b, 0x07, 0xf3, 0xd0, 0xbc, 0xba, 0x52, 0x9d, 0x49, 0x53, 0xe0, 0x9c, 0x92, 0x68, - 0x11, 0x50, 0x1a, 0x23, 0x46, 0x98, 0x65, 0x0f, 0x48, 0xf3, 0xc3, 0x19, 0x25, 0xd0, 0x07, 0x61, - 0x58, 0x39, 0xac, 0x6a, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, 0xb0, 0x49, 0x87, 0xae, 0xf0, 0xf4, - 0x13, 0x22, 0x51, 0x89, 0x30, 0x8a, 0x8c, 0xca, 0xdc, 0x12, 0x02, 0x8c, 0x75, 0x9a, 0xd9, 0x97, - 0x7f, 0xeb, 0x1b, 0xe7, 0xdf, 0xf7, 0xbb, 0xdf, 0x38, 0xff, 0xbe, 0x3f, 0xf8, 0xc6, 0xf9, 0xf7, - 0x7d, 0xd7, 0xfd, 0xf3, 0xd6, 0x6f, 0xdd, 0x3f, 0x6f, 0xfd, 0xee, 0xfd, 0xf3, 0xd6, 0x1f, 0xdc, - 0x3f, 0x6f, 0xfd, 0xf1, 0xfd, 0xf3, 0xd6, 0x17, 0xff, 0xe4, 0xfc, 0xfb, 0xde, 0x44, 0x71, 0x00, - 0xd1, 0xcb, 0x74, 0x74, 0x2e, 0xef, 0x5e, 0xf9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x0b, - 0x0a, 0x3d, 0x91, 0x13, 0x01, 0x00, + // 15465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, + 0x75, 0x18, 0xcc, 0xea, 0x9e, 0xab, 0xdf, 0xdc, 0x39, 0x00, 0x76, 0x30, 0x0b, 0xa0, 0xb1, 0xb5, + 0xbb, 0x58, 0xec, 0x35, 0x20, 0xf6, 0x20, 0x97, 0xbb, 0xcb, 0x15, 0xe7, 0x04, 0x66, 0x31, 0x33, + 0xe8, 0xcd, 0x1e, 0x00, 0xe4, 0x72, 0xc9, 0x8f, 0x85, 0xee, 0x9c, 0x99, 0xe2, 0x74, 0x57, 0xf5, + 0x56, 0x55, 0x0f, 0x30, 0xf8, 0xc8, 0x90, 0x44, 0x7d, 0xa2, 0x44, 0x4a, 0x5f, 0x04, 0xe3, 0x0b, + 0x7d, 0x47, 0x50, 0x0a, 0xc5, 0x17, 0x92, 0xac, 0xc3, 0xb4, 0x64, 0xd3, 0x94, 0x25, 0x59, 0xd4, + 0xe5, 0x2b, 0x2c, 0x29, 0x1c, 0xb2, 0xac, 0x08, 0x8b, 0x0a, 0x2b, 0x3c, 0x32, 0x21, 0x47, 0x28, + 0xf4, 0xc3, 0x92, 0x7c, 0xfc, 0xb0, 0x61, 0xd9, 0x72, 0xe4, 0x59, 0x99, 0x75, 0x74, 0xf7, 0x60, + 0x07, 0xc3, 0x25, 0x63, 0xff, 0x75, 0xbf, 0xf7, 0xf2, 0x65, 0x56, 0x9e, 0x2f, 0xdf, 0x7b, 0xf9, + 0x1e, 0xbc, 0xb2, 0xf3, 0x52, 0x38, 0xeb, 0xfa, 0x17, 0x76, 0xda, 0x37, 0x49, 0xe0, 0x91, 0x88, + 0x84, 0x17, 0x76, 0x89, 0x57, 0xf7, 0x83, 0x0b, 0x02, 0xe1, 0xb4, 0xdc, 0x0b, 0x35, 0x3f, 0x20, + 0x17, 0x76, 0x2f, 0x5e, 0xd8, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x67, 0x5b, 0x81, 0x1f, 0xf9, + 0x08, 0x71, 0x9a, 0x59, 0xa7, 0xe5, 0xce, 0x52, 0x9a, 0xd9, 0xdd, 0x8b, 0x33, 0xcf, 0x6e, 0xb9, + 0xd1, 0x76, 0xfb, 0xe6, 0x6c, 0xcd, 0x6f, 0x5e, 0xd8, 0xf2, 0xb7, 0xfc, 0x0b, 0x8c, 0xf4, 0x66, + 0x7b, 0x93, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xe6, 0x85, 0xb8, 0x9a, 0xa6, 0x53, 0xdb, + 0x76, 0x3d, 0x12, 0xec, 0x5d, 0x68, 0xed, 0x6c, 0xb1, 0x7a, 0x03, 0x12, 0xfa, 0xed, 0xa0, 0x46, + 0x92, 0x15, 0x77, 0x2c, 0x15, 0x5e, 0x68, 0x92, 0xc8, 0xc9, 0x68, 0xee, 0xcc, 0x85, 0xbc, 0x52, + 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, 0x9a, 0x0f, 0x74, 0x2b, 0x10, 0xd6, 0xb6, 0x49, 0xd3, 0x49, + 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, 0x72, 0x1b, 0x17, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc8, + 0xfe, 0x86, 0x05, 0x67, 0xe7, 0x6e, 0x54, 0x97, 0x1a, 0x4e, 0x18, 0xb9, 0xb5, 0xf9, 0x86, 0x5f, + 0xdb, 0xa9, 0x46, 0x7e, 0x40, 0xae, 0xfb, 0x8d, 0x76, 0x93, 0x54, 0x59, 0x47, 0xa0, 0x67, 0x60, + 0x68, 0x97, 0xfd, 0x5f, 0x59, 0x9c, 0xb6, 0xce, 0x5a, 0xe7, 0x4b, 0xf3, 0x13, 0xbf, 0xbd, 0x5f, + 0x7e, 0xdf, 0xdd, 0xfd, 0xf2, 0xd0, 0x75, 0x01, 0xc7, 0x8a, 0x02, 0x9d, 0x83, 0x81, 0xcd, 0x70, + 0x63, 0xaf, 0x45, 0xa6, 0x0b, 0x8c, 0x76, 0x4c, 0xd0, 0x0e, 0x2c, 0x57, 0x29, 0x14, 0x0b, 0x2c, + 0xba, 0x00, 0xa5, 0x96, 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x74, 0xf1, 0xac, 0x75, 0xbe, 0x7f, + 0x7e, 0x52, 0x90, 0x96, 0x2a, 0x12, 0x81, 0x63, 0x1a, 0xda, 0x8c, 0x80, 0x38, 0xf5, 0xab, 0x5e, + 0x63, 0x6f, 0xba, 0xef, 0xac, 0x75, 0x7e, 0x28, 0x6e, 0x06, 0x16, 0x70, 0xac, 0x28, 0xec, 0x2f, + 0x17, 0x60, 0x68, 0x6e, 0x73, 0xd3, 0xf5, 0xdc, 0x68, 0x0f, 0x5d, 0x87, 0x11, 0xcf, 0xaf, 0x13, + 0xf9, 0x9f, 0x7d, 0xc5, 0xf0, 0x73, 0x67, 0x67, 0xd3, 0x53, 0x69, 0x76, 0x5d, 0xa3, 0x9b, 0x9f, + 0xb8, 0xbb, 0x5f, 0x1e, 0xd1, 0x21, 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, + 0x60, 0x6c, 0xcb, 0x59, 0x6c, 0x2b, 0x31, 0xd9, 0xfc, 0xf8, 0xdd, 0xfd, 0xf2, 0xb0, 0x06, 0xc0, + 0x3a, 0x13, 0x74, 0x13, 0xc6, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdf, 0x22, 0xe3, 0xfb, 0x68, 0x1e, + 0x5f, 0x8d, 0x74, 0x7e, 0xea, 0xee, 0x7e, 0x79, 0x3c, 0x01, 0xc4, 0x49, 0x86, 0xf6, 0x1d, 0x18, + 0x9b, 0x8b, 0x22, 0xa7, 0xb6, 0x4d, 0xea, 0x7c, 0x04, 0xd1, 0x0b, 0xd0, 0xe7, 0x39, 0x4d, 0x22, + 0xc6, 0xf7, 0xac, 0xe8, 0xd8, 0xbe, 0x75, 0xa7, 0x49, 0xee, 0xed, 0x97, 0x27, 0xae, 0x79, 0xee, + 0xdb, 0x6d, 0x31, 0x2b, 0x28, 0x0c, 0x33, 0x6a, 0xf4, 0x1c, 0x40, 0x9d, 0xec, 0xba, 0x35, 0x52, + 0x71, 0xa2, 0x6d, 0x31, 0xde, 0x48, 0x94, 0x85, 0x45, 0x85, 0xc1, 0x1a, 0x95, 0x7d, 0x1b, 0x4a, + 0x73, 0xbb, 0xbe, 0x5b, 0xaf, 0xf8, 0xf5, 0x10, 0xed, 0xc0, 0x78, 0x2b, 0x20, 0x9b, 0x24, 0x50, + 0xa0, 0x69, 0xeb, 0x6c, 0xf1, 0xfc, 0xf0, 0x73, 0xe7, 0x33, 0x3f, 0xd6, 0x24, 0x5d, 0xf2, 0xa2, + 0x60, 0x6f, 0xfe, 0x21, 0x51, 0xdf, 0x78, 0x02, 0x8b, 0x93, 0x9c, 0xed, 0x7f, 0x5a, 0x80, 0xe3, + 0x73, 0x77, 0xda, 0x01, 0x59, 0x74, 0xc3, 0x9d, 0xe4, 0x0c, 0xaf, 0xbb, 0xe1, 0xce, 0x7a, 0xdc, + 0x03, 0x6a, 0x6a, 0x2d, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x59, 0x18, 0xa4, 0xbf, 0xaf, 0xe1, 0x15, + 0xf1, 0xc9, 0x53, 0x82, 0x78, 0x78, 0xd1, 0x89, 0x9c, 0x45, 0x8e, 0xc2, 0x92, 0x06, 0xad, 0xc1, + 0x70, 0x8d, 0x2d, 0xc8, 0xad, 0x35, 0xbf, 0x4e, 0xd8, 0x60, 0x96, 0xe6, 0x9f, 0xa6, 0xe4, 0x0b, + 0x31, 0xf8, 0xde, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, + 0xbe, 0xfa, 0x18, 0x27, 0xc8, 0x58, 0x5b, 0xe7, 0xb5, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, + 0x32, 0x41, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, + 0x5c, 0xaf, 0x7e, 0x6f, 0xbf, 0x3c, 0x69, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0xb3, 0x05, + 0x65, 0x86, 0x5b, 0x76, 0x1b, 0xa4, 0x42, 0x82, 0xd0, 0x0d, 0x23, 0xe2, 0x45, 0x46, 0x87, 0x3e, + 0x07, 0x10, 0x92, 0x5a, 0x40, 0x22, 0xad, 0x4b, 0xd5, 0xc4, 0xa8, 0x2a, 0x0c, 0xd6, 0xa8, 0xe8, + 0x86, 0x10, 0x6e, 0x3b, 0x01, 0x9b, 0x5f, 0xa2, 0x63, 0xd5, 0x86, 0x50, 0x95, 0x08, 0x1c, 0xd3, + 0x18, 0x1b, 0x42, 0xb1, 0xdb, 0x86, 0x80, 0x3e, 0x0c, 0xe3, 0x71, 0x65, 0x61, 0xcb, 0xa9, 0xc9, + 0x0e, 0x64, 0x4b, 0xa6, 0x6a, 0xa2, 0x70, 0x92, 0xd6, 0xfe, 0xdb, 0x96, 0x98, 0x3c, 0xf4, 0xab, + 0xdf, 0xe5, 0xdf, 0x6a, 0xff, 0x8a, 0x05, 0x83, 0xf3, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x29, + 0x18, 0xa2, 0x67, 0x53, 0xdd, 0x89, 0x1c, 0xb1, 0xef, 0xbd, 0x5f, 0x5b, 0x5b, 0xea, 0xa8, 0x98, + 0x6d, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x4b, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0xd3, 0xa4, 0x16, + 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, + 0x48, 0x24, 0x36, 0xc0, 0xcc, 0x8d, 0x8a, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x63, + 0x61, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0x3f, 0x06, 0xe1, 0xe4, 0x42, 0x75, 0x25, 0x67, 0x5e, + 0x9d, 0x83, 0x81, 0x7a, 0xe0, 0xee, 0x92, 0x40, 0xf4, 0xb3, 0xe2, 0xb2, 0xc8, 0xa0, 0x58, 0x60, + 0xd1, 0x4b, 0x30, 0xc2, 0x0f, 0xa4, 0xcb, 0x8e, 0x57, 0x6f, 0xc8, 0x2e, 0x3e, 0x26, 0xa8, 0x47, + 0xae, 0x6b, 0x38, 0x6c, 0x50, 0x1e, 0x70, 0x52, 0x9d, 0x4b, 0x2c, 0xc6, 0xbc, 0xc3, 0xee, 0x0b, + 0x16, 0x4c, 0xf0, 0x6a, 0xe6, 0xa2, 0x28, 0x70, 0x6f, 0xb6, 0x23, 0x12, 0x4e, 0xf7, 0xb3, 0x9d, + 0x6e, 0x21, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xf6, 0x7a, 0x82, 0x0b, 0xdf, 0x04, 0xa7, 0x45, 0xbd, + 0x13, 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xcf, 0x82, 0x99, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, + 0x90, 0xa0, 0xd2, 0xbe, 0xd9, 0x70, 0xc3, 0x6d, 0x3e, 0x4f, 0x31, 0xd9, 0x64, 0x3b, 0x41, 0xce, + 0x18, 0x2a, 0x22, 0x31, 0x86, 0x67, 0xee, 0xee, 0x97, 0x67, 0x16, 0x72, 0x59, 0xe1, 0x0e, 0xd5, + 0xa0, 0x1d, 0x40, 0xf4, 0x28, 0xad, 0x46, 0xce, 0x16, 0x89, 0x2b, 0x1f, 0xec, 0xbd, 0xf2, 0x13, + 0x77, 0xf7, 0xcb, 0x68, 0x3d, 0xc5, 0x02, 0x67, 0xb0, 0x45, 0x6f, 0xc3, 0x31, 0x0a, 0x4d, 0x7d, + 0xeb, 0x50, 0xef, 0xd5, 0x4d, 0xdf, 0xdd, 0x2f, 0x1f, 0x5b, 0xcf, 0x60, 0x82, 0x33, 0x59, 0xa3, + 0xef, 0xb1, 0xe0, 0x64, 0xfc, 0xf9, 0x4b, 0xb7, 0x5b, 0x8e, 0x57, 0x8f, 0x2b, 0x2e, 0xf5, 0x5e, + 0x31, 0xdd, 0x93, 0x4f, 0x2e, 0xe4, 0x71, 0xc2, 0xf9, 0x95, 0x20, 0x0f, 0xa6, 0x68, 0xd3, 0x92, + 0x75, 0x43, 0xef, 0x75, 0x3f, 0x74, 0x77, 0xbf, 0x3c, 0xb5, 0x9e, 0xe6, 0x81, 0xb3, 0x18, 0xcf, + 0x2c, 0xc0, 0xf1, 0xcc, 0xd9, 0x89, 0x26, 0xa0, 0xb8, 0x43, 0xb8, 0xd4, 0x55, 0xc2, 0xf4, 0x27, + 0x3a, 0x06, 0xfd, 0xbb, 0x4e, 0xa3, 0x2d, 0x16, 0x26, 0xe6, 0x7f, 0x5e, 0x2e, 0xbc, 0x64, 0xd9, + 0xff, 0xac, 0x08, 0xe3, 0x0b, 0xd5, 0x95, 0xfb, 0x5a, 0xf5, 0xfa, 0xb1, 0x57, 0xe8, 0x78, 0xec, + 0xc5, 0x87, 0x68, 0x31, 0xf7, 0x10, 0xfd, 0xee, 0x8c, 0x25, 0xdb, 0xc7, 0x96, 0xec, 0x87, 0x72, + 0x96, 0xec, 0x21, 0x2f, 0xd4, 0xdd, 0x9c, 0x59, 0xdb, 0xcf, 0x06, 0x30, 0x53, 0x42, 0x5a, 0xf5, + 0x6b, 0x4e, 0x23, 0xb9, 0xd5, 0x1e, 0x70, 0xea, 0x1e, 0xce, 0x38, 0xd6, 0x60, 0x64, 0xc1, 0x69, + 0x39, 0x37, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, 0x40, 0xd1, 0xa9, 0xd7, 0x99, 0x74, 0x57, + 0x9a, 0x3f, 0x7e, 0x77, 0xbf, 0x5c, 0x9c, 0xab, 0x53, 0x31, 0x03, 0x14, 0xd5, 0x1e, 0xa6, 0x14, + 0xe8, 0x29, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x5b, 0x0c, 0xfc, + 0x56, 0x82, 0x94, 0xd1, 0xd8, 0xbf, 0x55, 0x80, 0x53, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, + 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, + 0x86, 0x15, 0x16, 0x9d, 0x85, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, + 0x43, 0x29, 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0xd7, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, + 0x01, 0x2a, 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, + 0x62, 0x64, 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, + 0xae, 0xa2, 0xc2, 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, + 0xbe, 0x24, 0x0e, 0xab, 0xf7, 0xfe, 0x8b, 0x05, 0xa7, 0x16, 0x5c, 0xaf, 0x4e, 0x82, 0x9c, 0x09, + 0xf8, 0x60, 0xee, 0xce, 0x07, 0x13, 0x52, 0x8c, 0x29, 0xd6, 0x77, 0x08, 0x53, 0xcc, 0xfe, 0x4b, + 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdd, 0xc7, 0x5e, 0x4b, 0x7f, 0xec, 0x21, 0x4c, 0x0b, 0xfb, 0xef, + 0x5a, 0x30, 0xbc, 0xd0, 0x70, 0xdc, 0xa6, 0xf8, 0xd4, 0x05, 0x98, 0x94, 0x8a, 0x22, 0x06, 0xd6, + 0x64, 0x7f, 0xba, 0xb9, 0x4d, 0xe2, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0xc7, 0xe1, 0xa4, 0x01, 0xdc, + 0x20, 0xcd, 0x56, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, + 0xaf, 0xc2, 0xd8, 0x42, 0xc3, 0x25, 0x5e, 0xb4, 0x52, 0x59, 0xf0, 0xbd, 0x4d, 0x77, 0x0b, 0xbd, + 0x0c, 0x63, 0x91, 0xdb, 0x24, 0x7e, 0x3b, 0xaa, 0x92, 0x9a, 0xef, 0xb1, 0xbb, 0xb6, 0x75, 0xbe, + 0x7f, 0x1e, 0xdd, 0xdd, 0x2f, 0x8f, 0x6d, 0x18, 0x18, 0x9c, 0xa0, 0xb4, 0x7f, 0x9a, 0xee, 0xb4, + 0x8d, 0x76, 0x18, 0x91, 0x60, 0x23, 0x68, 0x87, 0xd1, 0x7c, 0x9b, 0x4a, 0xcb, 0x95, 0xc0, 0xa7, + 0x1d, 0xe8, 0xfa, 0x1e, 0x3a, 0x65, 0x28, 0x10, 0x86, 0xa4, 0xf2, 0x40, 0x28, 0x0a, 0x66, 0x01, + 0x42, 0x77, 0xcb, 0x23, 0x81, 0xf6, 0x69, 0x63, 0x6c, 0x71, 0x2b, 0x28, 0xd6, 0x28, 0x50, 0x03, + 0x46, 0x1b, 0xce, 0x4d, 0xd2, 0xa8, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0x20, 0x54, 0x20, 0xcf, 0xf7, + 0x76, 0x73, 0x59, 0xd5, 0x8b, 0xce, 0x4f, 0xde, 0xdd, 0x2f, 0x8f, 0x1a, 0x20, 0x6c, 0x32, 0xa7, + 0x9b, 0x9d, 0xdf, 0xa2, 0x5f, 0xe1, 0x34, 0xf4, 0xeb, 0xf2, 0x55, 0x01, 0xc3, 0x0a, 0xab, 0x36, + 0xbb, 0xbe, 0xbc, 0xcd, 0xce, 0xfe, 0x63, 0xba, 0x34, 0xfc, 0x66, 0xcb, 0xf7, 0x88, 0x17, 0x2d, + 0xf8, 0x5e, 0x9d, 0x2b, 0xaf, 0x5e, 0x86, 0xbe, 0x88, 0x4e, 0x75, 0xde, 0x3d, 0xe7, 0x64, 0x41, + 0x3a, 0xc1, 0xef, 0xed, 0x97, 0x4f, 0xa4, 0x4b, 0xb0, 0x25, 0xc0, 0xca, 0xa0, 0x0f, 0xc1, 0x40, + 0x18, 0x39, 0x51, 0x3b, 0x14, 0x1d, 0xf7, 0x88, 0x5c, 0x28, 0x55, 0x06, 0xbd, 0xb7, 0x5f, 0x1e, + 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, 0xa4, 0xa0, + 0x33, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, 0x20, 0xf0, 0x03, + 0xf1, 0x6d, 0xa3, 0x82, 0xb0, 0x7f, 0x89, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xd2, 0x82, 0x71, 0xd5, + 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x04, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, 0x60, 0x30, 0xfc, + 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x75, + 0x0b, 0xa6, 0x12, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x2b, 0xf5, 0x55, 0xb3, 0x3d, 0x4e, 0x3e, + 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, 0x11, 0x69, 0xca, + 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, 0x4b, 0x62, 0xce, 0xc0, 0xfe, + 0xad, 0x22, 0x94, 0xf8, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0x43, 0xc9, 0x6d, 0x36, + 0xdb, 0x91, 0x73, 0x53, 0x9c, 0xd4, 0x43, 0x7c, 0xd7, 0x5c, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0x0a, + 0xf4, 0xb1, 0xa6, 0xf0, 0xaf, 0x7c, 0x22, 0xfb, 0x2b, 0x45, 0xdb, 0x67, 0x17, 0x9d, 0xc8, 0xe1, + 0x42, 0xb2, 0x5a, 0x57, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0xdc, 0x74, 0x3d, 0x27, 0xd8, 0xa3, + 0xb0, 0xe9, 0x22, 0x63, 0xf8, 0x6c, 0x67, 0x86, 0xf3, 0x8a, 0x9e, 0xb3, 0x55, 0x1f, 0x16, 0x23, + 0xb0, 0xc6, 0x74, 0xe6, 0x83, 0x50, 0x52, 0xc4, 0x07, 0x91, 0x75, 0x67, 0x3e, 0x0c, 0xe3, 0x89, + 0xba, 0xba, 0x15, 0x1f, 0xd1, 0x45, 0xe5, 0x5f, 0x65, 0x5b, 0x86, 0x68, 0xf5, 0x92, 0xb7, 0x2b, + 0x8e, 0x98, 0x3b, 0x70, 0xac, 0x91, 0x71, 0x48, 0x89, 0x71, 0xed, 0xfd, 0x50, 0x3b, 0x25, 0x3e, + 0xfb, 0x58, 0x16, 0x16, 0x67, 0xd6, 0x61, 0xec, 0x88, 0x85, 0x4e, 0x3b, 0x22, 0xdd, 0xef, 0x8e, + 0xa9, 0xc6, 0x5f, 0x21, 0x7b, 0x6a, 0x53, 0xfd, 0x56, 0x36, 0xff, 0x34, 0xef, 0x7d, 0xbe, 0x5d, + 0x0e, 0x0b, 0x06, 0xc5, 0x2b, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0x8e, 0x5f, 0xf7, 0x55, + 0x0b, 0x46, 0xd5, 0xd7, 0x1d, 0xc1, 0xbe, 0x30, 0x6f, 0xee, 0x0b, 0xa7, 0x3b, 0x4e, 0xf0, 0x9c, + 0x1d, 0xe1, 0xeb, 0x05, 0x38, 0xa9, 0x68, 0xe8, 0xb5, 0x8f, 0xff, 0x11, 0xb3, 0xea, 0x02, 0x94, + 0x3c, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, 0xe7, 0xc5, 0x87, + 0xf6, 0x88, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x3c, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, 0x30, 0xef, 0x97, + 0xbd, 0x7d, 0x6d, 0x65, 0xf1, 0xde, 0x7e, 0xf9, 0x91, 0x3c, 0xab, 0x14, 0x3d, 0xd9, 0xc2, 0xd9, + 0x6b, 0x2b, 0x8b, 0x98, 0x16, 0x46, 0x73, 0x30, 0x2e, 0x45, 0x99, 0xeb, 0x54, 0x92, 0xf6, 0x3d, + 0x71, 0x0e, 0x29, 0xf5, 0x3e, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0x16, 0x61, 0x62, 0xa7, 0x7d, 0x93, + 0x34, 0x48, 0xc4, 0x3f, 0xf8, 0x0a, 0xe1, 0xca, 0xef, 0x52, 0x7c, 0xe9, 0xbe, 0x92, 0xc0, 0xe3, + 0x54, 0x09, 0xfb, 0x6f, 0xd8, 0x79, 0x20, 0x7a, 0x4f, 0x93, 0x6f, 0xbe, 0x95, 0xd3, 0xb9, 0x97, + 0x59, 0x71, 0x85, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x9e, 0x15, 0xc6, 0x9c, 0xef, 0xeb, 0x38, + 0xe7, 0x7f, 0xb1, 0x00, 0xc7, 0x55, 0x0f, 0x18, 0xf2, 0xfd, 0xb7, 0x7b, 0x1f, 0x5c, 0x84, 0xe1, + 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x59, 0x62, 0xfa, 0xb9, 0x35, 0x6e, 0x31, 0x06, 0x63, 0x9d, + 0xe6, 0x00, 0xdd, 0xf6, 0xf3, 0xa3, 0xec, 0x20, 0x8e, 0x1c, 0x3a, 0xc7, 0xd5, 0xaa, 0xb1, 0x72, + 0x57, 0xcd, 0xa3, 0xd0, 0xef, 0x36, 0xa9, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x15, 0x0a, 0xc4, 0x1c, + 0x87, 0x1e, 0x87, 0xc1, 0x9a, 0xdf, 0x6c, 0x3a, 0x5e, 0x9d, 0x1d, 0x79, 0xa5, 0xf9, 0x61, 0x2a, + 0xbb, 0x2d, 0x70, 0x10, 0x96, 0x38, 0x2a, 0x7c, 0x3b, 0xc1, 0x16, 0x57, 0x4f, 0x09, 0xe1, 0x7b, + 0x2e, 0xd8, 0x0a, 0x31, 0x83, 0xd2, 0xdb, 0xf5, 0x2d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x45, 0x37, + 0x10, 0x4b, 0x42, 0x9d, 0x85, 0x37, 0x14, 0x06, 0x6b, 0x54, 0x68, 0x19, 0xfa, 0x5b, 0x7e, 0x10, + 0x85, 0xd3, 0x03, 0xac, 0xbb, 0x1f, 0xc9, 0xd9, 0x88, 0xf8, 0xd7, 0x56, 0xfc, 0x20, 0x8a, 0x3f, + 0x80, 0xfe, 0x0b, 0x31, 0x2f, 0x8e, 0x56, 0x61, 0x90, 0x78, 0xbb, 0xcb, 0x81, 0xdf, 0x9c, 0x9e, + 0xca, 0xe7, 0xb4, 0xc4, 0x49, 0xf8, 0x34, 0x8b, 0x65, 0x54, 0x01, 0xc6, 0x92, 0x05, 0xfa, 0x10, + 0x14, 0x89, 0xb7, 0x3b, 0x3d, 0xc8, 0x38, 0xcd, 0xe4, 0x70, 0xba, 0xee, 0x04, 0xf1, 0x9e, 0xbf, + 0xe4, 0xed, 0x62, 0x5a, 0x06, 0x7d, 0x0c, 0x4a, 0x72, 0xc3, 0x08, 0x85, 0xde, 0x37, 0x73, 0xc2, + 0xca, 0x6d, 0x06, 0x93, 0xb7, 0xdb, 0x6e, 0x40, 0x9a, 0xc4, 0x8b, 0xc2, 0x78, 0x87, 0x94, 0xd8, + 0x10, 0xc7, 0xdc, 0x50, 0x0d, 0x46, 0x02, 0x12, 0xba, 0x77, 0x48, 0xc5, 0x6f, 0xb8, 0xb5, 0xbd, + 0xe9, 0x87, 0x58, 0xf3, 0x9e, 0xec, 0xd8, 0x65, 0x58, 0x2b, 0x10, 0xdb, 0x25, 0x74, 0x28, 0x36, + 0x98, 0xa2, 0x37, 0x60, 0x34, 0x20, 0x61, 0xe4, 0x04, 0x91, 0xa8, 0x65, 0x5a, 0xd9, 0x11, 0x47, + 0xb1, 0x8e, 0xe0, 0xd7, 0x89, 0xb8, 0x9a, 0x18, 0x83, 0x4d, 0x0e, 0xe8, 0x63, 0xd2, 0x48, 0xb2, + 0xe6, 0xb7, 0xbd, 0x28, 0x9c, 0x2e, 0xb1, 0x76, 0x67, 0x9a, 0xaf, 0xaf, 0xc7, 0x74, 0x49, 0x2b, + 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x13, 0x30, 0xca, 0xff, 0x73, 0x23, 0x70, 0x38, 0x7d, 0x9c, + 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0xce, 0x1f, 0x17, 0xcc, 0x47, 0x75, 0x68, 0x88, 0x4d, 0x6e, + 0x08, 0xc3, 0x68, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, 0x42, 0xa7, 0x7d, + 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, 0x74, 0x0d, 0xc6, + 0x02, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, 0x42, 0x38, 0xc1, + 0x04, 0x5d, 0x85, 0x11, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, 0xe6, 0x73, 0x50, + 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x93, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, + 0x3d, 0xc2, 0xb8, 0x65, 0x6e, 0x86, 0xab, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, + 0xeb, 0x70, 0x22, 0x22, 0x41, 0xd3, 0xf5, 0x1c, 0xba, 0x89, 0x89, 0x2b, 0x21, 0xb3, 0xe5, 0x8f, + 0xb2, 0xd9, 0x75, 0x46, 0x8c, 0xc6, 0x89, 0x8d, 0x4c, 0x2a, 0x9c, 0x53, 0x1a, 0xdd, 0x86, 0xe9, + 0x0c, 0x0c, 0x9f, 0xb7, 0xc7, 0x18, 0xe7, 0x57, 0x05, 0xe7, 0xe9, 0x8d, 0x1c, 0xba, 0x7b, 0x1d, + 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0xe3, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, 0x51, 0xe1, 0x18, + 0xab, 0xf0, 0x71, 0x29, 0x47, 0xac, 0x98, 0xe8, 0x7b, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, + 0x74, 0x93, 0x99, 0x8d, 0xdb, 0x81, 0x1b, 0xed, 0xd1, 0x55, 0x45, 0x6e, 0x47, 0xd3, 0xe3, 0x1d, + 0x55, 0x68, 0x3a, 0xa9, 0xb2, 0x2d, 0xeb, 0x40, 0x9c, 0x64, 0x48, 0x8f, 0x82, 0x30, 0xaa, 0xbb, + 0xde, 0xf4, 0x04, 0xbf, 0x4f, 0xc9, 0x9d, 0xb4, 0x4a, 0x81, 0x98, 0xe3, 0x98, 0xc9, 0x98, 0xfe, + 0xb8, 0x4a, 0x4f, 0xdc, 0x49, 0x46, 0x18, 0x9b, 0x8c, 0x25, 0x02, 0xc7, 0x34, 0x54, 0x08, 0x8e, + 0xa2, 0xbd, 0x69, 0xc4, 0x48, 0xd5, 0x86, 0xb8, 0xb1, 0xf1, 0x31, 0x4c, 0xe1, 0xf6, 0x4d, 0x18, + 0x53, 0xdb, 0x04, 0xeb, 0x13, 0x54, 0x86, 0x7e, 0x26, 0xf6, 0x09, 0x85, 0x6f, 0x89, 0x36, 0x81, + 0x89, 0x84, 0x98, 0xc3, 0x59, 0x13, 0xdc, 0x3b, 0x64, 0x7e, 0x2f, 0x22, 0x5c, 0x17, 0x51, 0xd4, + 0x9a, 0x20, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x27, 0x17, 0x9f, 0xe3, 0x53, 0xa2, 0x87, 0x73, 0xf1, + 0x19, 0x18, 0xda, 0xf6, 0xc3, 0x88, 0x52, 0xb3, 0x3a, 0xfa, 0x63, 0x81, 0xf9, 0xb2, 0x80, 0x63, + 0x45, 0x81, 0x5e, 0x81, 0xd1, 0x9a, 0x5e, 0x81, 0x38, 0xd4, 0xd5, 0x36, 0x62, 0xd4, 0x8e, 0x4d, + 0x5a, 0xf4, 0x12, 0x0c, 0x31, 0x37, 0xa8, 0x9a, 0xdf, 0x10, 0xd2, 0xa6, 0x94, 0x4c, 0x86, 0x2a, + 0x02, 0x7e, 0x4f, 0xfb, 0x8d, 0x15, 0x35, 0x3a, 0x07, 0x03, 0xb4, 0x09, 0x2b, 0x15, 0x71, 0x9c, + 0x2a, 0xdd, 0xe5, 0x65, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0xba, 0xc5, 0x64, 0xa9, 0xf4, 0x9e, 0x8f, + 0x2e, 0xb3, 0x43, 0x83, 0x9d, 0x20, 0x9a, 0xee, 0xf0, 0x31, 0xed, 0x24, 0x50, 0xb8, 0x7b, 0x89, + 0xff, 0xd8, 0x28, 0x89, 0xde, 0x4c, 0x9e, 0x0c, 0x5c, 0xa0, 0x78, 0x41, 0x76, 0x41, 0xf2, 0x74, + 0x78, 0x38, 0x3e, 0xe2, 0x68, 0x7b, 0x3a, 0x1d, 0x11, 0xf6, 0xff, 0x55, 0xd0, 0x66, 0x49, 0x35, + 0x72, 0x22, 0x82, 0x2a, 0x30, 0x78, 0xcb, 0x71, 0x23, 0xd7, 0xdb, 0x12, 0x72, 0x5f, 0xe7, 0x83, + 0x8e, 0x15, 0xba, 0xc1, 0x0b, 0x70, 0xe9, 0x45, 0xfc, 0xc1, 0x92, 0x0d, 0xe5, 0x18, 0xb4, 0x3d, + 0x8f, 0x72, 0x2c, 0xf4, 0xca, 0x11, 0xf3, 0x02, 0x9c, 0xa3, 0xf8, 0x83, 0x25, 0x1b, 0xf4, 0x16, + 0x80, 0xdc, 0x21, 0x48, 0x5d, 0xe8, 0x0e, 0x9f, 0xe9, 0xce, 0x74, 0x43, 0x95, 0xe1, 0xca, 0xc9, + 0xf8, 0x3f, 0xd6, 0xf8, 0xd9, 0x91, 0x36, 0xa6, 0x7a, 0x63, 0xd0, 0xc7, 0xe9, 0x12, 0x75, 0x82, + 0x88, 0xd4, 0xe7, 0x22, 0xd1, 0x39, 0x4f, 0xf5, 0x76, 0x39, 0xdc, 0x70, 0x9b, 0x44, 0x5f, 0xce, + 0x82, 0x09, 0x8e, 0xf9, 0xd9, 0xbf, 0x5c, 0x84, 0xe9, 0xbc, 0xe6, 0xd2, 0x45, 0x43, 0x6e, 0xbb, + 0xd1, 0x02, 0x15, 0x6b, 0x2d, 0x73, 0xd1, 0x2c, 0x09, 0x38, 0x56, 0x14, 0x74, 0xf6, 0x86, 0xee, + 0x96, 0xbc, 0xdb, 0xf7, 0xc7, 0xb3, 0xb7, 0xca, 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x40, 0x9c, 0x50, + 0xf8, 0xe7, 0x69, 0xb3, 0x1c, 0x33, 0x28, 0x16, 0x58, 0x5d, 0xcb, 0xd8, 0xd7, 0x45, 0xcb, 0x68, + 0x74, 0x51, 0xff, 0xe1, 0x76, 0x11, 0xfa, 0x24, 0xc0, 0xa6, 0xeb, 0xb9, 0xe1, 0x36, 0xe3, 0x3e, + 0x70, 0x60, 0xee, 0x4a, 0x28, 0x5e, 0x56, 0x5c, 0xb0, 0xc6, 0x11, 0xbd, 0x08, 0xc3, 0x6a, 0x03, + 0x59, 0x59, 0x64, 0xce, 0x0a, 0x9a, 0xf3, 0x57, 0xbc, 0x9b, 0x2e, 0x62, 0x9d, 0xce, 0xfe, 0x74, + 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, 0x3a, 0xf7, 0xaf, 0xfd, 0xcd, + 0x01, 0x18, 0x37, 0x2a, 0x6b, 0x87, 0x3d, 0xec, 0xb9, 0x97, 0xe8, 0x01, 0xe4, 0x44, 0x44, 0xac, + 0x3f, 0xbb, 0xfb, 0x52, 0xd1, 0x0f, 0x29, 0xba, 0x02, 0x78, 0x79, 0xf4, 0x49, 0x28, 0x35, 0x9c, + 0x90, 0x69, 0x2c, 0x89, 0x58, 0x77, 0xbd, 0x30, 0x8b, 0x2f, 0x84, 0x4e, 0x18, 0x69, 0xa7, 0x3e, + 0xe7, 0x1d, 0xb3, 0xa4, 0x27, 0x25, 0x95, 0xaf, 0xa4, 0x03, 0xa8, 0x6a, 0x04, 0x15, 0xc2, 0xf6, + 0x30, 0xc7, 0xa1, 0x97, 0xd8, 0xd6, 0x4a, 0x67, 0xc5, 0x02, 0x95, 0x46, 0xd9, 0x34, 0xeb, 0x37, + 0x84, 0x6c, 0x85, 0xc3, 0x06, 0x65, 0x7c, 0x27, 0x1b, 0xe8, 0x70, 0x27, 0x7b, 0x12, 0x06, 0xd9, + 0x0f, 0x35, 0x03, 0xd4, 0x68, 0xac, 0x70, 0x30, 0x96, 0xf8, 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, + 0xd0, 0x5b, 0x9f, 0x98, 0xd4, 0xcc, 0x51, 0x64, 0x88, 0xef, 0x72, 0x62, 0xca, 0x63, 0x89, 0x43, + 0x3f, 0x63, 0x01, 0x72, 0x1a, 0xf4, 0xb6, 0x4c, 0xc1, 0xea, 0x72, 0x03, 0x4c, 0xd4, 0x7e, 0xa5, + 0x6b, 0xb7, 0xb7, 0xc3, 0xd9, 0xb9, 0x54, 0x69, 0xae, 0x29, 0x7d, 0x59, 0x34, 0x11, 0xa5, 0x09, + 0xf4, 0xc3, 0x68, 0xd5, 0x0d, 0xa3, 0xcf, 0xfd, 0x49, 0xe2, 0x70, 0xca, 0x68, 0x12, 0xba, 0xa6, + 0x5f, 0xbe, 0x86, 0x0f, 0x78, 0xf9, 0x1a, 0xcd, 0xbb, 0x78, 0xcd, 0xb4, 0xe1, 0xa1, 0x9c, 0x2f, + 0xc8, 0xd0, 0xbf, 0x2e, 0xea, 0xfa, 0xd7, 0x2e, 0x5a, 0xbb, 0x59, 0x59, 0xc7, 0xec, 0x1b, 0x6d, + 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0xd7, 0xd7, 0x3e, 0x05, 0x63, 0x8b, 0x0e, 0x69, 0xfa, 0xde, 0x92, + 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x69, 0xe8, 0x63, 0xc2, 0x07, 0xdf, 0x7a, 0xfb, 0x68, 0xef, + 0x61, 0x06, 0xb1, 0xb7, 0xe0, 0xf8, 0xa2, 0x7f, 0xcb, 0xbb, 0xe5, 0x04, 0xf5, 0xb9, 0xca, 0x8a, + 0xa6, 0x4f, 0x5a, 0x97, 0xfa, 0x0c, 0x2b, 0xff, 0xb6, 0xa8, 0x95, 0xe4, 0xd7, 0xa1, 0x65, 0xb7, + 0x41, 0x72, 0xb4, 0x7e, 0xff, 0x6f, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x36, 0x2b, 0x2b, 0xd7, 0x40, + 0xff, 0x06, 0x0c, 0x6d, 0xba, 0xa4, 0x51, 0xc7, 0x64, 0x53, 0xf4, 0xce, 0x13, 0xf9, 0x2e, 0x7c, + 0xcb, 0x94, 0x52, 0x19, 0xd7, 0x98, 0x36, 0x64, 0x59, 0x14, 0xc6, 0x8a, 0x0d, 0xda, 0x81, 0x09, + 0xd9, 0x87, 0x12, 0x2b, 0xf6, 0x83, 0x27, 0x3b, 0x0d, 0xbc, 0xc9, 0xfc, 0xd8, 0xdd, 0xfd, 0xf2, + 0x04, 0x4e, 0xb0, 0xc1, 0x29, 0xc6, 0xe8, 0x14, 0xf4, 0x35, 0xe9, 0xc9, 0xd7, 0xc7, 0xba, 0x9f, + 0xa9, 0x3f, 0x98, 0x26, 0x87, 0x41, 0xed, 0x1f, 0xb3, 0xe0, 0xa1, 0x54, 0xcf, 0x08, 0x8d, 0xd6, + 0x21, 0x8f, 0x42, 0x52, 0xc3, 0x54, 0xe8, 0xae, 0x61, 0xb2, 0xff, 0x8e, 0x05, 0xc7, 0x96, 0x9a, + 0xad, 0x68, 0x6f, 0xd1, 0x35, 0xad, 0xe9, 0x1f, 0x84, 0x81, 0x26, 0xa9, 0xbb, 0xed, 0xa6, 0x18, + 0xb9, 0xb2, 0x3c, 0x1d, 0xd6, 0x18, 0xf4, 0xde, 0x7e, 0x79, 0xb4, 0x1a, 0xf9, 0x81, 0xb3, 0x45, + 0x38, 0x00, 0x0b, 0x72, 0x76, 0xc6, 0xba, 0x77, 0xc8, 0xaa, 0xdb, 0x74, 0xa3, 0xfb, 0x9b, 0xed, + 0xc2, 0x10, 0x2e, 0x99, 0xe0, 0x98, 0x9f, 0xfd, 0x0d, 0x0b, 0xc6, 0xe5, 0xbc, 0x9f, 0xab, 0xd7, + 0x03, 0x12, 0x86, 0x68, 0x06, 0x0a, 0x6e, 0x4b, 0xb4, 0x12, 0x44, 0x2b, 0x0b, 0x2b, 0x15, 0x5c, + 0x70, 0x5b, 0x52, 0x9c, 0x67, 0x07, 0x50, 0xd1, 0xf4, 0x09, 0xb8, 0x2c, 0xe0, 0x58, 0x51, 0xa0, + 0xf3, 0x30, 0xe4, 0xf9, 0x75, 0x2e, 0x11, 0x0b, 0x1b, 0x2b, 0xa5, 0x5c, 0x17, 0x30, 0xac, 0xb0, + 0xa8, 0x02, 0x25, 0xee, 0x31, 0x1a, 0x4f, 0xda, 0x9e, 0xfc, 0x4e, 0xd9, 0x97, 0x6d, 0xc8, 0x92, + 0x38, 0x66, 0x62, 0xff, 0xa6, 0x05, 0x23, 0xf2, 0xcb, 0x7a, 0xbc, 0xab, 0xd0, 0xa5, 0x15, 0xdf, + 0x53, 0xe2, 0xa5, 0x45, 0xef, 0x1a, 0x0c, 0x63, 0x5c, 0x31, 0x8a, 0x07, 0xba, 0x62, 0x5c, 0x84, + 0x61, 0xa7, 0xd5, 0xaa, 0x98, 0xf7, 0x13, 0x36, 0x95, 0xe6, 0x62, 0x30, 0xd6, 0x69, 0xec, 0x1f, + 0x2d, 0xc0, 0x98, 0xfc, 0x82, 0x6a, 0xfb, 0x66, 0x48, 0x22, 0xb4, 0x01, 0x25, 0x87, 0x8f, 0x12, + 0x91, 0x93, 0xfc, 0xd1, 0x6c, 0xbd, 0x99, 0x31, 0xa4, 0xb1, 0xa0, 0x35, 0x27, 0x4b, 0xe3, 0x98, + 0x11, 0x6a, 0xc0, 0xa4, 0xe7, 0x47, 0xec, 0xd0, 0x55, 0xf8, 0x4e, 0xa6, 0xcc, 0x24, 0xf7, 0x93, + 0x82, 0xfb, 0xe4, 0x7a, 0x92, 0x0b, 0x4e, 0x33, 0x46, 0x4b, 0x52, 0x17, 0x59, 0xcc, 0x57, 0x22, + 0xe9, 0x03, 0x97, 0xad, 0x8a, 0xb4, 0x7f, 0xcd, 0x82, 0x92, 0x24, 0x3b, 0x0a, 0xab, 0xf5, 0x1a, + 0x0c, 0x86, 0x6c, 0x10, 0x64, 0xd7, 0xd8, 0x9d, 0x1a, 0xce, 0xc7, 0x2b, 0x96, 0x25, 0xf8, 0xff, + 0x10, 0x4b, 0x1e, 0xcc, 0x14, 0xa5, 0x9a, 0xff, 0x2e, 0x31, 0x45, 0xa9, 0xf6, 0xe4, 0x1c, 0x4a, + 0x7f, 0xc6, 0xda, 0xac, 0xe9, 0x76, 0xa9, 0xc8, 0xdb, 0x0a, 0xc8, 0xa6, 0x7b, 0x3b, 0x29, 0xf2, + 0x56, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x0b, 0x46, 0x6a, 0xd2, 0x06, 0x11, 0xaf, 0xf0, 0x73, 0x1d, + 0xed, 0x61, 0xca, 0x74, 0xca, 0x75, 0x68, 0x0b, 0x5a, 0x79, 0x6c, 0x70, 0x33, 0x3d, 0xa2, 0x8a, + 0xdd, 0x3c, 0xa2, 0x62, 0xbe, 0xf9, 0xfe, 0x41, 0x3f, 0x6e, 0xc1, 0x00, 0xd7, 0x3d, 0xf7, 0xa6, + 0xfa, 0xd7, 0x2c, 0xc9, 0x71, 0xdf, 0x5d, 0xa7, 0x40, 0x21, 0x69, 0xa0, 0x35, 0x28, 0xb1, 0x1f, + 0x4c, 0x77, 0x5e, 0xcc, 0x7f, 0xb0, 0xc4, 0x6b, 0xd5, 0x1b, 0x78, 0x5d, 0x16, 0xc3, 0x31, 0x07, + 0xfb, 0x47, 0x8a, 0x74, 0x77, 0x8b, 0x49, 0x8d, 0x43, 0xdf, 0x7a, 0x70, 0x87, 0x7e, 0xe1, 0x41, + 0x1d, 0xfa, 0x5b, 0x30, 0x5e, 0xd3, 0xec, 0xce, 0xf1, 0x48, 0x9e, 0xef, 0x38, 0x49, 0x34, 0x13, + 0x35, 0xd7, 0xce, 0x2d, 0x98, 0x4c, 0x70, 0x92, 0x2b, 0xfa, 0x38, 0x8c, 0xf0, 0x71, 0x16, 0xb5, + 0x70, 0xa7, 0xb2, 0xc7, 0xf3, 0xe7, 0x8b, 0x5e, 0x05, 0xd7, 0xe6, 0x6a, 0xc5, 0xb1, 0xc1, 0xcc, + 0xfe, 0x2b, 0x0b, 0xd0, 0x52, 0x6b, 0x9b, 0x34, 0x49, 0xe0, 0x34, 0x62, 0xf3, 0xd1, 0x17, 0x2d, + 0x98, 0x26, 0x29, 0xf0, 0x82, 0xdf, 0x6c, 0x8a, 0xcb, 0x62, 0x8e, 0x3e, 0x63, 0x29, 0xa7, 0x8c, + 0x7a, 0xd1, 0x35, 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, + 0xbc, 0xb8, 0x1e, 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x6b, 0xa3, 0x90, + 0xdb, 0x8a, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xb3, 0x9b, 0xbd, 0x67, 0x37, 0x7b, 0xcf, + 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x5d, 0x6a, 0x37, 0xfb, 0xbf, 0x2d, 0x38, 0xae, 0x8e, 0x2f, 0xe3, + 0xc2, 0xfe, 0x19, 0x98, 0xe2, 0xcb, 0xcd, 0x70, 0xc6, 0x16, 0xc7, 0xf5, 0xc5, 0xcc, 0x99, 0x9b, + 0x78, 0x34, 0x60, 0x14, 0xe4, 0xaf, 0xaf, 0x32, 0x10, 0x38, 0xab, 0x1a, 0xfb, 0x97, 0x87, 0xa0, + 0x7f, 0x69, 0x97, 0x78, 0xd1, 0x11, 0x5c, 0x6d, 0x6a, 0x30, 0xe6, 0x7a, 0xbb, 0x7e, 0x63, 0x97, + 0xd4, 0x39, 0xfe, 0x20, 0x37, 0xf0, 0x13, 0x82, 0xf5, 0xd8, 0x8a, 0xc1, 0x02, 0x27, 0x58, 0x3e, + 0x08, 0xeb, 0xc3, 0x25, 0x18, 0xe0, 0x87, 0x8f, 0x30, 0x3d, 0x64, 0xee, 0xd9, 0xac, 0x13, 0xc5, + 0x91, 0x1a, 0x5b, 0x46, 0xf8, 0xe1, 0x26, 0x8a, 0xa3, 0x4f, 0xc3, 0xd8, 0xa6, 0x1b, 0x84, 0xd1, + 0x86, 0xdb, 0xa4, 0x47, 0x43, 0xb3, 0x75, 0x1f, 0xd6, 0x06, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, + 0xe0, 0x8c, 0xb6, 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xb8, 0x2a, 0x75, 0x3a, 0xac, 0xea, + 0x8c, 0xb0, 0xc9, 0x97, 0x2e, 0xa7, 0x1a, 0x53, 0x98, 0x0f, 0x31, 0x75, 0x86, 0x5a, 0x4e, 0x5c, + 0x53, 0xce, 0x71, 0x54, 0x40, 0x63, 0x8e, 0xec, 0x25, 0x53, 0x40, 0xd3, 0xdc, 0xd5, 0x3f, 0x05, + 0x25, 0x42, 0xbb, 0x90, 0x32, 0x16, 0x07, 0xcc, 0x85, 0xde, 0xda, 0xba, 0xe6, 0xd6, 0x02, 0xdf, + 0xb4, 0xf3, 0x2c, 0x49, 0x4e, 0x38, 0x66, 0x8a, 0x16, 0x60, 0x20, 0x24, 0x81, 0xab, 0x74, 0xc9, + 0x1d, 0x86, 0x91, 0x91, 0xf1, 0xe7, 0x7d, 0xfc, 0x37, 0x16, 0x45, 0xe9, 0xf4, 0x72, 0x98, 0x2a, + 0x96, 0x1d, 0x06, 0xda, 0xf4, 0x9a, 0x63, 0x50, 0x2c, 0xb0, 0xe8, 0x75, 0x18, 0x0c, 0x48, 0x83, + 0x19, 0x12, 0x47, 0x7b, 0x9f, 0xe4, 0xdc, 0x2e, 0xc9, 0xcb, 0x61, 0xc9, 0x00, 0x5d, 0x01, 0x14, + 0x10, 0x2a, 0xe0, 0xb9, 0xde, 0x96, 0x72, 0xef, 0x16, 0x1b, 0xad, 0x12, 0xa4, 0x71, 0x4c, 0x21, + 0x5f, 0x76, 0xe2, 0x8c, 0x62, 0xe8, 0x12, 0x4c, 0x2a, 0xe8, 0x8a, 0x17, 0x46, 0x0e, 0xdd, 0xe0, + 0xc6, 0x19, 0x2f, 0xa5, 0x5f, 0xc1, 0x49, 0x02, 0x9c, 0x2e, 0x63, 0xff, 0x9c, 0x05, 0xbc, 0x9f, + 0x8f, 0x40, 0xab, 0xf0, 0x9a, 0xa9, 0x55, 0x38, 0x99, 0x3b, 0x72, 0x39, 0x1a, 0x85, 0x9f, 0xb3, + 0x60, 0x58, 0x1b, 0xd9, 0x78, 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0x7a, + 0x33, 0x24, 0xc1, 0x2e, 0xa9, 0xb3, 0x89, 0x59, 0xb8, 0xbf, 0x89, 0xa9, 0x5c, 0x49, 0x57, 0x13, + 0x0c, 0x71, 0xaa, 0x0a, 0xfb, 0x53, 0xb2, 0xa9, 0xca, 0xf3, 0xb6, 0xa6, 0xc6, 0x3c, 0xe1, 0x79, + 0xab, 0x46, 0x15, 0xc7, 0x34, 0x74, 0xa9, 0x6d, 0xfb, 0x61, 0x94, 0xf4, 0xbc, 0xbd, 0xec, 0x87, + 0x11, 0x66, 0x18, 0xfb, 0x79, 0x80, 0xa5, 0xdb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0xe9, 0xb1, 0xf2, + 0x2f, 0x3d, 0xf6, 0x1f, 0x58, 0x30, 0xb6, 0xbc, 0x60, 0x9c, 0x5c, 0xb3, 0x00, 0xfc, 0xa6, 0x76, + 0xe3, 0xc6, 0xba, 0x74, 0xff, 0xe0, 0x16, 0x70, 0x05, 0xc5, 0x1a, 0x05, 0x3a, 0x09, 0xc5, 0x46, + 0xdb, 0x13, 0x6a, 0xcf, 0x41, 0x7a, 0x3c, 0xae, 0xb6, 0x3d, 0x4c, 0x61, 0xda, 0xab, 0xae, 0x62, + 0xcf, 0xaf, 0xba, 0xba, 0x46, 0x73, 0x41, 0x65, 0xe8, 0xbf, 0x75, 0xcb, 0xad, 0xf3, 0x37, 0xf3, + 0xc2, 0x35, 0xe5, 0xc6, 0x8d, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0xa5, 0x22, 0xcc, 0x2c, 0x37, + 0xc8, 0xed, 0x77, 0x18, 0x37, 0xa0, 0xd7, 0x37, 0x69, 0x07, 0x53, 0x20, 0x1d, 0xf4, 0xdd, 0x61, + 0xf7, 0xfe, 0xd8, 0x84, 0x41, 0xee, 0x78, 0x2a, 0xa3, 0x08, 0x64, 0x9a, 0xfb, 0xf2, 0x3b, 0x64, + 0x96, 0x3b, 0xb0, 0x0a, 0x73, 0x9f, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, + 0x4e, 0x79, 0xa0, 0x17, 0xc0, 0xdf, 0x5b, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0xd7, 0xd2, + 0x03, 0x71, 0xd8, 0xaf, 0x40, 0xbb, 0x8f, 0xc6, 0x5b, 0xc9, 0xd1, 0xb8, 0x98, 0x37, 0x1a, 0x47, + 0x3d, 0x06, 0xdf, 0x67, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x5e, 0x6a, 0xbe, 0x08, 0xc3, + 0x74, 0x3b, 0x0e, 0x8d, 0xa0, 0x25, 0x46, 0x18, 0x1b, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xda, + 0xb5, 0x95, 0xc5, 0xac, 0xe8, 0x37, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xf7, 0x2c, 0x38, 0x7d, 0x69, + 0x61, 0x29, 0x9e, 0x8a, 0xa9, 0x00, 0x3c, 0xe7, 0x60, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xab, 0x85, + 0x17, 0x59, 0x2b, 0x04, 0xf6, 0xdd, 0x12, 0x5c, 0xea, 0x1a, 0xc0, 0x25, 0x5c, 0x59, 0x10, 0xfb, + 0xae, 0xb4, 0x02, 0x59, 0xb9, 0x56, 0xa0, 0xc7, 0x61, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, + 0x1b, 0xf4, 0x39, 0x08, 0x4b, 0x9c, 0xfd, 0xb3, 0x16, 0x4c, 0x5d, 0x72, 0x23, 0x7a, 0x68, 0x27, + 0x23, 0xcc, 0xd0, 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x46, 0x98, 0xc1, 0x0a, 0x83, 0x35, + 0x2a, 0xfe, 0x41, 0xbb, 0x2e, 0x7b, 0x49, 0x51, 0x30, 0xed, 0x6e, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, + 0xfd, 0x55, 0x77, 0x03, 0xa6, 0xb2, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, + 0x8d, 0xfd, 0x17, 0x16, 0x94, 0x2f, 0xf1, 0xf7, 0xa0, 0x9b, 0x61, 0xce, 0xa6, 0xfb, 0x3c, 0x94, + 0x88, 0x34, 0x10, 0xc8, 0xb7, 0xb1, 0x52, 0x10, 0x55, 0x96, 0x03, 0x1e, 0xe8, 0x46, 0xd1, 0xf5, + 0xf0, 0x9c, 0xfc, 0x60, 0xef, 0x81, 0x97, 0x01, 0x11, 0xbd, 0x2e, 0x3d, 0xf2, 0x0f, 0x0b, 0x21, + 0xb2, 0x94, 0xc2, 0xe2, 0x8c, 0x12, 0xf6, 0x8f, 0x59, 0x70, 0x5c, 0x7d, 0xf0, 0xbb, 0xee, 0x33, + 0xed, 0xaf, 0x15, 0x60, 0xf4, 0xf2, 0xc6, 0x46, 0xe5, 0x12, 0x89, 0xb4, 0x59, 0xd9, 0xd9, 0xec, + 0x8f, 0x35, 0xeb, 0x65, 0xa7, 0x3b, 0x62, 0x3b, 0x72, 0x1b, 0xb3, 0x3c, 0x80, 0xdc, 0xec, 0x8a, + 0x17, 0x5d, 0x0d, 0xaa, 0x51, 0xe0, 0x7a, 0x5b, 0x99, 0x33, 0x5d, 0xca, 0x2c, 0xc5, 0x3c, 0x99, + 0x05, 0x3d, 0x0f, 0x03, 0x2c, 0x82, 0x9d, 0x1c, 0x84, 0x87, 0xd5, 0x15, 0x8b, 0x41, 0xef, 0xed, + 0x97, 0x4b, 0xd7, 0xf0, 0x0a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x06, 0xc3, 0xdb, 0x51, 0xd4, 0xba, + 0x4c, 0x9c, 0x3a, 0x09, 0xe4, 0x2e, 0x7b, 0x26, 0x6b, 0x97, 0xa5, 0x9d, 0xc0, 0xc9, 0xe2, 0x8d, + 0x29, 0x86, 0x85, 0x58, 0xe7, 0x63, 0x57, 0x01, 0x62, 0xdc, 0x21, 0x19, 0x6e, 0xec, 0x0d, 0x28, + 0xd1, 0xcf, 0x9d, 0x6b, 0xb8, 0x4e, 0x67, 0xd3, 0xf8, 0xd3, 0x50, 0x92, 0x86, 0xef, 0x50, 0x84, + 0xbb, 0x60, 0x27, 0x92, 0xb4, 0x8b, 0x87, 0x38, 0xc6, 0xdb, 0x8f, 0x81, 0xf0, 0x2d, 0xed, 0xc4, + 0xd2, 0xde, 0x84, 0x63, 0xcc, 0x49, 0xd6, 0x89, 0xb6, 0x8d, 0x39, 0xda, 0x7d, 0x32, 0x3c, 0x23, + 0xee, 0x75, 0xfc, 0xcb, 0xa6, 0xb5, 0xc7, 0xc9, 0x23, 0x92, 0x63, 0x7c, 0xc7, 0xb3, 0xff, 0xbc, + 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0xc7, 0x69, 0x7a, 0x09, 0x46, 0xb8, 0xb8, 0x48, 0xa7, 0x86, 0xd3, + 0x10, 0xf5, 0x2a, 0x0d, 0xe8, 0x86, 0x86, 0xc3, 0x06, 0x25, 0x3a, 0x0d, 0x45, 0xf7, 0x6d, 0x2f, + 0xf9, 0x74, 0x6f, 0xe5, 0x8d, 0x75, 0x4c, 0xe1, 0x14, 0x4d, 0x25, 0x4f, 0xbe, 0xa5, 0x2b, 0xb4, + 0x92, 0x3e, 0x5f, 0x83, 0x31, 0x37, 0xac, 0x85, 0xee, 0x8a, 0x47, 0xd7, 0xa9, 0xb6, 0xd2, 0x95, + 0xce, 0x81, 0x36, 0x5a, 0x61, 0x71, 0x82, 0x5a, 0x3b, 0x5f, 0xfa, 0x7b, 0x96, 0x5e, 0xbb, 0x46, + 0x89, 0xa0, 0xdb, 0x7f, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0xf0, 0x62, 0xfb, 0xe7, 0x1f, 0x1c, 0x62, + 0x89, 0xa3, 0x17, 0xba, 0xda, 0xb6, 0xd3, 0x9a, 0x6b, 0x47, 0xdb, 0x8b, 0x6e, 0x58, 0xf3, 0x77, + 0x49, 0xb0, 0xc7, 0xee, 0xe2, 0x43, 0xf1, 0x85, 0x4e, 0x21, 0x16, 0x2e, 0xcf, 0x55, 0x28, 0x25, + 0x4e, 0x97, 0x41, 0x73, 0x30, 0x2e, 0x81, 0x55, 0x12, 0xb2, 0x23, 0x60, 0x98, 0xb1, 0x51, 0x8f, + 0xe9, 0x04, 0x58, 0x31, 0x49, 0xd2, 0x9b, 0x02, 0x2e, 0x1c, 0x86, 0x80, 0xfb, 0x41, 0x18, 0x75, + 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xf6, 0x23, 0x7e, 0xed, 0x66, 0x0a, 0xe6, 0x15, 0x1d, 0x81, + 0x4d, 0x3a, 0xfb, 0xdf, 0xf7, 0xc1, 0x24, 0x1b, 0xb6, 0xf7, 0x66, 0xd8, 0x77, 0xd2, 0x0c, 0xbb, + 0x96, 0x9e, 0x61, 0x87, 0x21, 0xb9, 0xdf, 0xf7, 0x34, 0xfb, 0x34, 0x94, 0xd4, 0xfb, 0x41, 0xf9, + 0x80, 0xd8, 0xca, 0x79, 0x40, 0xdc, 0xfd, 0xf4, 0x96, 0x2e, 0x69, 0xc5, 0x4c, 0x97, 0xb4, 0xaf, + 0x58, 0x10, 0x1b, 0x16, 0xd0, 0x1b, 0x50, 0x6a, 0xf9, 0xcc, 0xc3, 0x35, 0x90, 0x6e, 0xe3, 0x8f, + 0x75, 0xb4, 0x4c, 0xf0, 0x50, 0x75, 0x01, 0xef, 0x85, 0x8a, 0x2c, 0x8a, 0x63, 0x2e, 0xe8, 0x0a, + 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x1c, 0xa5, 0xde, 0x19, 0xf2, 0x59, 0xc3, 0x0b, 0x62, 0xc9, + 0xc1, 0xfe, 0x85, 0x02, 0x4c, 0x24, 0x49, 0xd1, 0xab, 0xd0, 0x47, 0x6e, 0x93, 0x9a, 0x68, 0x6f, + 0xe6, 0x51, 0x1c, 0xab, 0x26, 0x78, 0x07, 0xd0, 0xff, 0x98, 0x95, 0x42, 0x97, 0x61, 0x90, 0x9e, + 0xc3, 0x97, 0x54, 0xcc, 0xc0, 0x47, 0xf2, 0xce, 0x72, 0x25, 0xd0, 0xf0, 0xc6, 0x09, 0x10, 0x96, + 0xc5, 0x99, 0x1f, 0x58, 0xad, 0x55, 0xa5, 0x57, 0x9c, 0xa8, 0xd3, 0x4d, 0x7c, 0x63, 0xa1, 0xc2, + 0x89, 0x04, 0x37, 0xee, 0x07, 0x26, 0x81, 0x38, 0x66, 0x82, 0x3e, 0x02, 0xfd, 0x61, 0x83, 0x90, + 0x96, 0x30, 0xf4, 0x67, 0x2a, 0x17, 0xab, 0x94, 0x40, 0x70, 0x62, 0xca, 0x08, 0x06, 0xc0, 0xbc, + 0xa0, 0xfd, 0x8b, 0x16, 0x00, 0x77, 0x9c, 0x73, 0xbc, 0x2d, 0x72, 0x04, 0xfa, 0xf8, 0x45, 0xe8, + 0x0b, 0x5b, 0xa4, 0xd6, 0xc9, 0x7d, 0x3b, 0x6e, 0x4f, 0xb5, 0x45, 0x6a, 0xf1, 0x9c, 0xa5, 0xff, + 0x30, 0x2b, 0x6d, 0x7f, 0x3f, 0xc0, 0x58, 0x4c, 0xb6, 0x12, 0x91, 0x26, 0x7a, 0xd6, 0x08, 0x5b, + 0x72, 0x32, 0x11, 0xb6, 0xa4, 0xc4, 0xa8, 0x35, 0xd5, 0xef, 0xa7, 0xa1, 0xd8, 0x74, 0x6e, 0x0b, + 0xdd, 0xde, 0xd3, 0x9d, 0x9b, 0x41, 0xf9, 0xcf, 0xae, 0x39, 0xb7, 0xf9, 0xf5, 0xf7, 0x69, 0xb9, + 0xc6, 0xd6, 0x9c, 0xdb, 0x5d, 0x5d, 0x8c, 0x69, 0x25, 0xac, 0x2e, 0xd7, 0x13, 0x3e, 0x61, 0x3d, + 0xd5, 0xe5, 0x7a, 0xc9, 0xba, 0x5c, 0xaf, 0x87, 0xba, 0x5c, 0x0f, 0xdd, 0x81, 0x41, 0xe1, 0xb2, + 0x29, 0x22, 0xc0, 0x5d, 0xe8, 0xa1, 0x3e, 0xe1, 0xf1, 0xc9, 0xeb, 0xbc, 0x20, 0xaf, 0xf7, 0x02, + 0xda, 0xb5, 0x5e, 0x59, 0x21, 0xfa, 0x7f, 0x2c, 0x18, 0x13, 0xbf, 0x31, 0x79, 0xbb, 0x4d, 0xc2, + 0x48, 0x88, 0xbf, 0x1f, 0xe8, 0xbd, 0x0d, 0xa2, 0x20, 0x6f, 0xca, 0x07, 0xe4, 0x49, 0x65, 0x22, + 0xbb, 0xb6, 0x28, 0xd1, 0x0a, 0xf4, 0x0b, 0x16, 0x1c, 0x6b, 0x3a, 0xb7, 0x79, 0x8d, 0x1c, 0x86, + 0x9d, 0xc8, 0xf5, 0x85, 0xeb, 0xc3, 0xab, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x73, + 0x1e, 0xcb, 0x22, 0xe9, 0xda, 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc2, 0x90, 0x9c, 0x6f, 0x0f, 0xd2, + 0x3f, 0x9c, 0xd5, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, 0xcf, 0xb1, 0x07, 0x5a, + 0xd7, 0xdb, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0xb7, 0xe0, 0x64, 0xee, 0xfc, 0x78, 0xa0, + 0xfe, 0xfd, 0x5f, 0xb3, 0xf4, 0x7d, 0xf0, 0x08, 0x8c, 0x22, 0x0b, 0xa6, 0x51, 0xe4, 0x4c, 0xe7, + 0x95, 0x93, 0x63, 0x19, 0x79, 0x4b, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0xeb, 0x30, 0xd0, 0xa0, 0x10, + 0xe9, 0xf8, 0x6b, 0x77, 0x5f, 0x91, 0xb1, 0x38, 0xca, 0xe0, 0x21, 0x16, 0x1c, 0xec, 0x5f, 0xb1, + 0xa0, 0xef, 0x08, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, 0x0c, 0x7f, 0x16, 0x3b, + 0xb7, 0x96, 0x6e, 0x47, 0xc4, 0x0b, 0xd9, 0x99, 0x9e, 0xd9, 0x31, 0xfb, 0x16, 0x4c, 0xad, 0xfa, + 0x4e, 0x7d, 0xde, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x2b, 0xde, 0xd6, 0x81, 0xbc, 0xd6, 0x0b, 0x5d, + 0xbd, 0xd6, 0x5f, 0x82, 0x01, 0xb7, 0xa5, 0x05, 0xf7, 0x3e, 0x4b, 0x3b, 0x70, 0xa5, 0x22, 0xe2, + 0x7a, 0x23, 0xa3, 0x72, 0x06, 0xc5, 0x82, 0x9e, 0x8e, 0x3c, 0x77, 0x17, 0xeb, 0xcb, 0x1f, 0x79, + 0x2a, 0xc5, 0x27, 0x43, 0x40, 0x19, 0x8e, 0xcd, 0xdb, 0x60, 0x54, 0x21, 0x5e, 0x7d, 0x61, 0x18, + 0x74, 0xf9, 0x97, 0x8a, 0xe1, 0x7f, 0x22, 0x5b, 0xba, 0x4e, 0x75, 0x8c, 0xf6, 0x9e, 0x89, 0x03, + 0xb0, 0x64, 0x64, 0xbf, 0x04, 0x99, 0x21, 0x3b, 0xba, 0x6b, 0x4e, 0xec, 0x8f, 0xc1, 0x24, 0x2b, + 0x79, 0x40, 0xad, 0x84, 0x9d, 0xd0, 0xf7, 0x66, 0xc4, 0x69, 0xb5, 0xff, 0x8d, 0x05, 0x68, 0xcd, + 0xaf, 0xbb, 0x9b, 0x7b, 0x82, 0x39, 0xff, 0xfe, 0xb7, 0xa1, 0xcc, 0xaf, 0x7d, 0xc9, 0x58, 0xa6, + 0x0b, 0x0d, 0x27, 0x0c, 0x35, 0x5d, 0xf3, 0x13, 0xa2, 0xde, 0xf2, 0x46, 0x67, 0x72, 0xdc, 0x8d, + 0x1f, 0x7a, 0x23, 0x11, 0xa8, 0xed, 0x43, 0xa9, 0x40, 0x6d, 0x4f, 0x64, 0x7a, 0x7c, 0xa4, 0x5b, + 0x2f, 0x03, 0xb8, 0xd9, 0x5f, 0xb0, 0x60, 0x7c, 0x3d, 0x11, 0x9b, 0xf3, 0x1c, 0x33, 0x7f, 0x67, + 0xd8, 0x50, 0xaa, 0x0c, 0x8a, 0x05, 0xf6, 0xd0, 0x75, 0x8c, 0x7f, 0x63, 0x41, 0x1c, 0x22, 0xe8, + 0x08, 0xa4, 0xda, 0x05, 0x43, 0xaa, 0xcd, 0xbc, 0x21, 0xa8, 0xe6, 0xe4, 0x09, 0xb5, 0xe8, 0x8a, + 0x1a, 0x93, 0x0e, 0x97, 0x83, 0x98, 0x0d, 0x5f, 0x67, 0x63, 0xe6, 0xc0, 0xa9, 0xd1, 0xf8, 0xc3, + 0x02, 0x20, 0x45, 0xdb, 0x73, 0x70, 0xbf, 0x74, 0x89, 0xc3, 0x09, 0xee, 0xb7, 0x0b, 0x88, 0x39, + 0x70, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0xd0, 0xaa, 0x1e, 0xcc, 0x3b, 0x64, 0x46, 0xbe, 0xf6, + 0x5b, 0x4d, 0x71, 0xc3, 0x19, 0x35, 0x68, 0x8e, 0x39, 0xfd, 0xbd, 0x3a, 0xe6, 0x0c, 0x74, 0x79, + 0xb6, 0xfa, 0x55, 0x0b, 0x46, 0x55, 0x37, 0xbd, 0x4b, 0x1e, 0x37, 0xa8, 0xf6, 0xe4, 0x9c, 0x2b, + 0x15, 0xad, 0xc9, 0xec, 0xbc, 0xfd, 0x2e, 0xf6, 0xfc, 0xd8, 0x69, 0xb8, 0x77, 0x88, 0x8a, 0x9a, + 0x5b, 0x16, 0xcf, 0x89, 0x05, 0xf4, 0xde, 0x7e, 0x79, 0x54, 0xfd, 0xe3, 0x51, 0x2f, 0xe3, 0x22, + 0xf6, 0x4f, 0xd1, 0xc5, 0x6e, 0x4e, 0x45, 0xf4, 0x22, 0xf4, 0xb7, 0xb6, 0x9d, 0x90, 0x24, 0x1e, + 0x81, 0xf5, 0x57, 0x28, 0xf0, 0xde, 0x7e, 0x79, 0x4c, 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x0f, + 0x99, 0x98, 0x9e, 0x9c, 0x5d, 0x43, 0x26, 0xfe, 0x95, 0x05, 0x7d, 0xeb, 0xf4, 0xf4, 0x7a, 0xf0, + 0x5b, 0xc0, 0x6b, 0xc6, 0x16, 0x70, 0x2a, 0x2f, 0x61, 0x4b, 0xee, 0xea, 0x5f, 0x4e, 0xac, 0xfe, + 0x33, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0x34, 0x30, 0xe2, 0xc1, 0xdb, 0xf3, 0xc6, + 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0x27, 0x61, 0x50, 0xbc, 0xa0, 0x4a, + 0xbe, 0xe2, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x1f, 0x2f, 0x82, 0x91, 0x76, 0x06, 0xfd, 0x9a, 0x05, + 0xb3, 0x01, 0xf7, 0xac, 0xae, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, 0xbd, 0xdd, + 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0xdb, 0xa4, 0xd6, 0x66, 0x56, 0xcf, 0x2e, + 0x39, 0x6e, 0xd4, 0x0b, 0x85, 0xe7, 0xee, 0xee, 0x97, 0x67, 0xf1, 0x81, 0x78, 0xe3, 0x03, 0xb6, + 0x05, 0xfd, 0x9e, 0x05, 0x17, 0x78, 0x36, 0x96, 0xde, 0xdb, 0xdf, 0x41, 0x89, 0x50, 0x91, 0xac, + 0x62, 0x26, 0x1b, 0x24, 0x68, 0xce, 0x7f, 0x50, 0x74, 0xe8, 0x85, 0xca, 0xc1, 0xea, 0xc2, 0x07, + 0x6d, 0x9c, 0xfd, 0x8f, 0x8a, 0x30, 0x2a, 0x42, 0xeb, 0x89, 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, + 0x92, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xe1, 0x6c, 0xff, 0x21, 0x4c, 0xd2, 0xcd, 0xf9, 0x32, 0x71, + 0x82, 0xe8, 0x26, 0x71, 0xb8, 0xbf, 0x5d, 0xf1, 0xc0, 0xbb, 0xbf, 0x52, 0xfc, 0xae, 0x26, 0x99, + 0xe1, 0x34, 0xff, 0xef, 0xa4, 0x33, 0xc7, 0x83, 0x89, 0x54, 0x74, 0xc4, 0x37, 0xa1, 0xa4, 0x9e, + 0xff, 0x88, 0x4d, 0xa7, 0x73, 0x90, 0xd1, 0x24, 0x07, 0xae, 0x57, 0x8c, 0x9f, 0x9e, 0xc5, 0xec, + 0xec, 0xbf, 0x57, 0x30, 0x2a, 0xe4, 0x83, 0xb8, 0x0e, 0x43, 0x4e, 0xc8, 0x02, 0x1f, 0xd7, 0x3b, + 0xa9, 0x7e, 0x53, 0xd5, 0xb0, 0x27, 0x58, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, 0x65, 0xee, 0xd5, + 0xb8, 0x4b, 0x3a, 0xe9, 0x7d, 0x53, 0xdc, 0x40, 0xfa, 0x3d, 0xee, 0x12, 0x2c, 0xca, 0xa3, 0x4f, + 0x70, 0xb7, 0xd3, 0x2b, 0x9e, 0x7f, 0xcb, 0xbb, 0xe4, 0xfb, 0x32, 0x8c, 0x4a, 0x6f, 0x0c, 0x27, + 0xa5, 0xb3, 0xa9, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x85, 0x1b, 0xfe, 0x0c, 0xb0, 0xec, 0x13, 0xe6, + 0x6b, 0xfb, 0x10, 0x11, 0x18, 0x17, 0x71, 0x1b, 0x25, 0x4c, 0xf4, 0x5d, 0xe6, 0x0d, 0xd7, 0x2c, + 0x1d, 0x5b, 0x28, 0xae, 0x98, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0x33, 0x16, 0xb0, 0x97, 0xc7, 0x47, + 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, 0x81, 0xcf, 0xac, + 0x4a, 0xe0, 0xdf, 0xde, 0x13, 0xbe, 0x42, 0xdd, 0x2f, 0x57, 0xf6, 0x7f, 0xb7, 0xf8, 0x26, 0x16, + 0xc7, 0x69, 0xf8, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x39, 0xd2, 0x72, 0x15, 0x9d, 0x46, + 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x2b, 0xee, 0x64, 0xfc, 0xcf, 0x21, 0x09, 0xee, 0xaa, 0xac, 0x53, + 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0x6a, 0x79, 0x3e, 0xcb, 0x8f, 0x58, 0x15, 0xaf, + 0xb6, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0xe6, 0xfc, 0x58, 0xb7, 0x43, 0x94, 0x9d, + 0x3e, 0xda, 0xa3, 0xe6, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0x84, 0x05, 0x0f, 0xe9, 0x84, 0xda, + 0xbb, 0xa9, 0x6e, 0xd6, 0xa7, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xa7, 0xc6, + 0x79, 0xd9, 0xe9, 0x57, 0x05, 0xfc, 0x9e, 0xc8, 0xf8, 0x21, 0xb9, 0x4b, 0x38, 0x56, 0x25, 0xe9, + 0xd5, 0x9a, 0x75, 0x46, 0x28, 0x5e, 0xc8, 0xb1, 0x3d, 0x80, 0x39, 0x32, 0x84, 0x58, 0x60, 0xec, + 0x3f, 0xb7, 0xf8, 0xc4, 0xd2, 0x9b, 0x8e, 0xde, 0x86, 0x89, 0xa6, 0x13, 0xd5, 0xb6, 0x97, 0x6e, + 0xb7, 0x02, 0x6e, 0xcb, 0x93, 0xfd, 0xf4, 0x74, 0xb7, 0x7e, 0xd2, 0x3e, 0x32, 0xf6, 0xa4, 0x5d, + 0x4b, 0x30, 0xc3, 0x29, 0xf6, 0xe8, 0x26, 0x0c, 0x33, 0x18, 0x7b, 0xfc, 0x19, 0x76, 0x12, 0x0d, + 0xf2, 0x6a, 0x53, 0xbe, 0x20, 0x6b, 0x31, 0x1f, 0xac, 0x33, 0xb5, 0xbf, 0x52, 0xe4, 0xab, 0x9d, + 0x89, 0xf2, 0x4f, 0xc2, 0x60, 0xcb, 0xaf, 0x2f, 0xac, 0x2c, 0x62, 0x31, 0x0a, 0xea, 0x18, 0xa9, + 0x70, 0x30, 0x96, 0x78, 0x74, 0x1e, 0x86, 0xc4, 0x4f, 0x69, 0x7b, 0x65, 0x7b, 0xb3, 0xa0, 0x0b, + 0xb1, 0xc2, 0xa2, 0xe7, 0x00, 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x30, 0x98, 0xa2, 0xe9, 0xc6, + 0x55, 0x51, 0x18, 0xac, 0x51, 0xa1, 0x57, 0x60, 0xb4, 0xed, 0x85, 0x5c, 0x1c, 0xd1, 0x42, 0x6e, + 0x2b, 0x07, 0xa3, 0x6b, 0x3a, 0x12, 0x9b, 0xb4, 0x68, 0x0e, 0x06, 0x22, 0x87, 0xb9, 0x25, 0xf5, + 0xe7, 0x7b, 0x5b, 0x6f, 0x50, 0x0a, 0x3d, 0x1d, 0x17, 0x2d, 0x80, 0x45, 0x41, 0xf4, 0xa6, 0x7c, + 0x87, 0xcd, 0x37, 0x76, 0xf1, 0xcc, 0xa1, 0xb7, 0x43, 0x40, 0x7b, 0x85, 0x2d, 0x9e, 0x4f, 0x18, + 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x76, 0x44, 0x02, 0xcf, 0x69, 0x28, 0x67, 0x42, 0x25, 0x17, 0x2c, + 0xfa, 0xeb, 0x7e, 0x74, 0x2d, 0x24, 0x4b, 0x8a, 0x02, 0x6b, 0xd4, 0xf6, 0xef, 0x95, 0x00, 0x62, + 0xb9, 0x1d, 0xdd, 0x49, 0x6d, 0x5c, 0xcf, 0x74, 0x96, 0xf4, 0x0f, 0x6f, 0xd7, 0x42, 0x9f, 0xb7, + 0x60, 0x58, 0xc4, 0xbc, 0x61, 0x23, 0x54, 0xe8, 0xbc, 0x71, 0x9a, 0xa1, 0x77, 0x68, 0x09, 0xde, + 0x84, 0xe7, 0xe5, 0x0c, 0xd5, 0x30, 0x5d, 0x5b, 0xa1, 0x57, 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, + 0xa3, 0x2b, 0xd5, 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, 0x78, 0xcd, 0xb8, 0x25, 0xf6, 0xe5, + 0x3f, 0x34, 0x35, 0xc4, 0xd7, 0x6e, 0x17, 0x44, 0x54, 0xd1, 0x83, 0x4e, 0xf4, 0xe7, 0xbf, 0x8e, + 0xd4, 0xee, 0x49, 0x5d, 0x02, 0x4e, 0x7c, 0x1a, 0xc6, 0xeb, 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, + 0x79, 0x7c, 0x13, 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, 0xc9, 0x18, 0x55, 0x78, 0x0c, 0x92, + 0x15, 0x6f, 0xd3, 0x17, 0x4f, 0x6d, 0xec, 0xdc, 0xb1, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x94, 0xf1, + 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, 0x05, 0xbd, 0x0e, 0x03, 0xec, 0x79, 0x5c, 0x38, 0x3d, 0x94, + 0xaf, 0x88, 0x37, 0x83, 0x31, 0xc6, 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0x2e, 0xcb, 0xc7, + 0xa7, 0xe1, 0x8a, 0x77, 0x2d, 0x24, 0xec, 0xf1, 0x69, 0x69, 0xfe, 0xb1, 0xf8, 0x5d, 0x29, 0x87, + 0x67, 0x26, 0xed, 0x34, 0x4a, 0x52, 0x29, 0x4a, 0xfc, 0x97, 0xb9, 0x40, 0x45, 0xe8, 0xa8, 0xcc, + 0xe6, 0x99, 0xf9, 0x42, 0xe3, 0xee, 0xbc, 0x6e, 0xb2, 0xc0, 0x49, 0x9e, 0x54, 0x22, 0xe5, 0xab, + 0x5e, 0x3c, 0xd6, 0xe9, 0xb6, 0x77, 0xf0, 0x8b, 0x38, 0x3b, 0x8d, 0x38, 0x04, 0x8b, 0xf2, 0x47, + 0x2a, 0x1e, 0xcc, 0x78, 0x30, 0x91, 0x5c, 0xa2, 0x0f, 0x54, 0x1c, 0xf9, 0xd3, 0x3e, 0x18, 0x33, + 0xa7, 0x14, 0xba, 0x00, 0x25, 0xc1, 0x44, 0xe5, 0xd3, 0x51, 0xab, 0x64, 0x4d, 0x22, 0x70, 0x4c, + 0xc3, 0xd2, 0x28, 0xb1, 0xe2, 0x9a, 0x77, 0x76, 0x9c, 0x46, 0x49, 0x61, 0xb0, 0x46, 0x45, 0x2f, + 0x56, 0x37, 0x7d, 0x3f, 0x52, 0x07, 0x92, 0x9a, 0x77, 0xf3, 0x0c, 0x8a, 0x05, 0x96, 0x1e, 0x44, + 0x3b, 0x24, 0xf0, 0x48, 0xc3, 0x0c, 0xcf, 0xae, 0x0e, 0xa2, 0x2b, 0x3a, 0x12, 0x9b, 0xb4, 0xf4, + 0x38, 0xf5, 0x43, 0x36, 0x91, 0xc5, 0xf5, 0x2d, 0xf6, 0x76, 0xaf, 0xf2, 0x77, 0xfb, 0x12, 0x8f, + 0x3e, 0x06, 0x0f, 0xa9, 0x50, 0x68, 0x98, 0x1b, 0x79, 0x64, 0x8d, 0x03, 0x86, 0xb6, 0xe5, 0xa1, + 0x85, 0x6c, 0x32, 0x9c, 0x57, 0x1e, 0xbd, 0x06, 0x63, 0x42, 0xc4, 0x97, 0x1c, 0x07, 0x4d, 0xd7, + 0xad, 0x2b, 0x06, 0x16, 0x27, 0xa8, 0x65, 0x80, 0x79, 0x26, 0x65, 0x4b, 0x0e, 0x43, 0xe9, 0x00, + 0xf3, 0x3a, 0x1e, 0xa7, 0x4a, 0xa0, 0x39, 0x18, 0xe7, 0x32, 0x98, 0xeb, 0x6d, 0xf1, 0x31, 0x11, + 0x6f, 0xe9, 0xd4, 0x92, 0xba, 0x6a, 0xa2, 0x71, 0x92, 0x1e, 0xbd, 0x04, 0x23, 0x4e, 0x50, 0xdb, + 0x76, 0x23, 0x52, 0x8b, 0xda, 0x01, 0x7f, 0x64, 0xa7, 0xf9, 0xbe, 0xcd, 0x69, 0x38, 0x6c, 0x50, + 0xda, 0x77, 0x60, 0x2a, 0x23, 0xa0, 0x07, 0x9d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x70, 0x30, + 0x9f, 0xab, 0xac, 0xc8, 0xaf, 0xd1, 0xa8, 0xe8, 0xec, 0x64, 0x81, 0x3f, 0xb4, 0xd4, 0xbf, 0x6a, + 0x76, 0x2e, 0x4b, 0x04, 0x8e, 0x69, 0xec, 0xff, 0x54, 0x80, 0xf1, 0x0c, 0xc3, 0x11, 0x4b, 0x3f, + 0x9b, 0xb8, 0xa4, 0xc4, 0xd9, 0x66, 0xcd, 0x7c, 0x05, 0x85, 0x03, 0xe4, 0x2b, 0x28, 0x76, 0xcb, + 0x57, 0xd0, 0xf7, 0x4e, 0xf2, 0x15, 0x98, 0x3d, 0xd6, 0xdf, 0x53, 0x8f, 0x65, 0xe4, 0x38, 0x18, + 0x38, 0x60, 0x8e, 0x03, 0xa3, 0xd3, 0x07, 0x7b, 0xe8, 0xf4, 0x1f, 0x29, 0xc0, 0x44, 0xd2, 0xe6, + 0x74, 0x04, 0x7a, 0xdb, 0xd7, 0x0d, 0xbd, 0xed, 0xf9, 0x5e, 0xde, 0x3e, 0xe7, 0xea, 0x70, 0x71, + 0x42, 0x87, 0xfb, 0x54, 0x4f, 0xdc, 0x3a, 0xeb, 0x73, 0x7f, 0xb2, 0x00, 0xc7, 0x33, 0x4d, 0x71, + 0x47, 0xd0, 0x37, 0x57, 0x8d, 0xbe, 0x79, 0xb6, 0xe7, 0x77, 0xe1, 0xb9, 0x1d, 0x74, 0x23, 0xd1, + 0x41, 0x17, 0x7a, 0x67, 0xd9, 0xb9, 0x97, 0xbe, 0x51, 0x84, 0x33, 0x99, 0xe5, 0x62, 0xb5, 0xe7, + 0xb2, 0xa1, 0xf6, 0x7c, 0x2e, 0xa1, 0xf6, 0xb4, 0x3b, 0x97, 0x3e, 0x1c, 0x3d, 0xa8, 0x78, 0x1f, + 0xcd, 0xa2, 0x3c, 0xdc, 0xa7, 0x0e, 0xd4, 0x78, 0x1f, 0xad, 0x18, 0x61, 0x93, 0xef, 0x77, 0x92, + 0xee, 0xf3, 0x77, 0x2c, 0x38, 0x99, 0x39, 0x36, 0x47, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, + 0xf6, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x5f, 0x18, 0xc8, 0xf9, 0x16, 0x76, 0x93, 0xbf, 0x0a, 0xc3, + 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xcd, 0xaf, 0xab, 0xd0, 0xe6, 0xcf, 0xb2, 0x7b, 0x56, 0x0c, 0xbe, + 0xb7, 0x5f, 0x9e, 0x49, 0xb2, 0x88, 0xd1, 0x58, 0xe7, 0x80, 0x3e, 0x01, 0x43, 0xa1, 0xcc, 0x4a, + 0xd7, 0x77, 0xff, 0x59, 0xe9, 0x98, 0x92, 0x40, 0x69, 0x2a, 0x14, 0x4b, 0xf4, 0xbf, 0xe9, 0xf1, + 0x76, 0xd2, 0x52, 0x65, 0x22, 0xfa, 0xcb, 0x7d, 0x44, 0xdd, 0x79, 0x0e, 0x60, 0x57, 0x5d, 0x09, + 0x92, 0x5a, 0x08, 0xed, 0xb2, 0xa0, 0x51, 0xa1, 0x8f, 0xc0, 0x44, 0xc8, 0x43, 0x4d, 0xc6, 0xce, + 0x13, 0x7c, 0x2e, 0xb2, 0x68, 0x5d, 0xd5, 0x04, 0x0e, 0xa7, 0xa8, 0xd1, 0xb2, 0xac, 0x95, 0xb9, + 0xc9, 0xf0, 0xe9, 0x79, 0x2e, 0xae, 0x51, 0xb8, 0xca, 0x1c, 0x4b, 0x0e, 0x02, 0xeb, 0x7e, 0xad, + 0x24, 0xfa, 0x04, 0x00, 0x9d, 0x44, 0x42, 0x1b, 0x31, 0x98, 0xbf, 0x85, 0xd2, 0xbd, 0xa5, 0x9e, + 0xe9, 0x3b, 0xce, 0x1e, 0x36, 0x2f, 0x2a, 0x26, 0x58, 0x63, 0x88, 0x1c, 0x18, 0x8d, 0xff, 0xc5, + 0x19, 0xa2, 0xcf, 0xe7, 0xd6, 0x90, 0x64, 0xce, 0x14, 0xdf, 0x8b, 0x3a, 0x0b, 0x6c, 0x72, 0x44, + 0x1f, 0x87, 0x93, 0xbb, 0xb9, 0x1e, 0x29, 0xa5, 0x38, 0xe9, 0x63, 0xbe, 0x1f, 0x4a, 0x7e, 0x79, + 0xfb, 0x77, 0x01, 0x1e, 0xee, 0xb0, 0xd3, 0xa3, 0x39, 0xd3, 0x9a, 0xfc, 0x74, 0x52, 0x45, 0x30, + 0x93, 0x59, 0xd8, 0xd0, 0x19, 0x24, 0x16, 0x54, 0xe1, 0x1d, 0x2f, 0xa8, 0x1f, 0xb2, 0x34, 0xe5, + 0x0d, 0x77, 0xe7, 0xfd, 0xf0, 0x01, 0x4f, 0xb0, 0x43, 0xd4, 0xe6, 0x6c, 0x66, 0xa8, 0x44, 0x9e, + 0xeb, 0xb9, 0x39, 0xbd, 0xeb, 0x48, 0xbe, 0x96, 0x1d, 0xbc, 0x99, 0x6b, 0x4b, 0x2e, 0x1d, 0xf4, + 0xfb, 0x8f, 0x2a, 0x90, 0xf3, 0x1f, 0x5a, 0x70, 0x32, 0x05, 0xe6, 0x6d, 0x20, 0xa1, 0x88, 0x2f, + 0xb6, 0xfe, 0x8e, 0x1b, 0x2f, 0x19, 0xf2, 0x6f, 0xb8, 0x2c, 0xbe, 0xe1, 0x64, 0x2e, 0x5d, 0xb2, + 0xe9, 0x5f, 0xfc, 0x93, 0xf2, 0x14, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x47, 0x2d, 0x38, 0x5b, + 0x6b, 0x07, 0x41, 0x3c, 0x59, 0x33, 0x16, 0x27, 0xbf, 0xeb, 0x3d, 0x76, 0x77, 0xbf, 0x7c, 0x76, + 0xa1, 0x0b, 0x2d, 0xee, 0xca, 0x0d, 0x79, 0x80, 0x9a, 0x29, 0xbf, 0x2f, 0x91, 0x18, 0x3e, 0xd3, + 0x53, 0x23, 0xed, 0x25, 0xc6, 0x1f, 0xb0, 0x66, 0x78, 0x8f, 0x65, 0x70, 0x3e, 0x5a, 0xed, 0xc9, + 0xb7, 0x26, 0x32, 0xf7, 0xcc, 0x2a, 0x9c, 0xe9, 0x3c, 0x99, 0x0e, 0xf4, 0x78, 0xfe, 0x0f, 0x2c, + 0x38, 0xdd, 0x31, 0x42, 0xd3, 0xb7, 0xe1, 0x65, 0xc1, 0xfe, 0x9c, 0x05, 0x8f, 0x64, 0x96, 0x30, + 0x5c, 0x0c, 0x2f, 0x40, 0xa9, 0x96, 0x48, 0x6b, 0x1c, 0xc7, 0x2a, 0x51, 0x29, 0x8d, 0x63, 0x1a, + 0xc3, 0x93, 0xb0, 0xd0, 0xd5, 0x93, 0xf0, 0x37, 0x2d, 0x48, 0x1d, 0xf5, 0x47, 0x20, 0x79, 0xae, + 0x98, 0x92, 0xe7, 0x63, 0xbd, 0xf4, 0x66, 0x8e, 0xd0, 0xf9, 0x97, 0xe3, 0x70, 0x22, 0xe7, 0xed, + 0xeb, 0x2e, 0x4c, 0x6e, 0xd5, 0x88, 0x19, 0xec, 0xa0, 0x53, 0x10, 0xb0, 0x8e, 0x91, 0x11, 0x78, + 0x36, 0xe9, 0x14, 0x09, 0x4e, 0x57, 0x81, 0x3e, 0x67, 0xc1, 0x31, 0xe7, 0x56, 0xb8, 0x44, 0x6f, + 0x10, 0x6e, 0x6d, 0xbe, 0xe1, 0xd7, 0x76, 0xa8, 0x60, 0x26, 0x97, 0xd5, 0x0b, 0x99, 0x5a, 0xdd, + 0x1b, 0xd5, 0x14, 0xbd, 0x51, 0xfd, 0xf4, 0xdd, 0xfd, 0xf2, 0xb1, 0x2c, 0x2a, 0x9c, 0x59, 0x17, + 0xc2, 0x22, 0x7b, 0x8f, 0x13, 0x6d, 0x77, 0x0a, 0xc7, 0x91, 0xf5, 0x48, 0x99, 0x8b, 0xc4, 0x12, + 0x83, 0x15, 0x1f, 0xf4, 0x29, 0x28, 0x6d, 0xc9, 0x97, 0xf7, 0x19, 0x22, 0x77, 0xdc, 0x91, 0x9d, + 0xe3, 0x11, 0x70, 0xd7, 0x0c, 0x45, 0x84, 0x63, 0xa6, 0xe8, 0x35, 0x28, 0x7a, 0x9b, 0x61, 0xa7, + 0xf4, 0xfb, 0x09, 0x1f, 0x5c, 0x1e, 0xf4, 0x66, 0x7d, 0xb9, 0x8a, 0x69, 0x41, 0x74, 0x19, 0x8a, + 0xc1, 0xcd, 0xba, 0x30, 0x49, 0x64, 0x2e, 0x52, 0x3c, 0xbf, 0x98, 0xd3, 0x2a, 0xc6, 0x09, 0xcf, + 0x2f, 0x62, 0xca, 0x02, 0x55, 0xa0, 0x9f, 0x3d, 0x18, 0x15, 0xa2, 0x6d, 0xe6, 0x55, 0xbe, 0xc3, + 0xc3, 0x6b, 0xfe, 0x18, 0x8d, 0x11, 0x60, 0xce, 0x08, 0x6d, 0xc0, 0x40, 0x8d, 0xa5, 0x6a, 0x17, + 0xb2, 0xec, 0xfb, 0x33, 0x8d, 0x0f, 0x1d, 0x72, 0xd8, 0x0b, 0x5d, 0x3c, 0xa3, 0xc0, 0x82, 0x17, + 0xe3, 0x4a, 0x5a, 0xdb, 0x9b, 0xf2, 0xc4, 0xca, 0xe6, 0x4a, 0x5a, 0xdb, 0xcb, 0xd5, 0x8e, 0x5c, + 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0xc7, 0xa0, 0x99, 0x56, 0x08, 0x33, + 0x6e, 0xd1, 0xfc, 0xc0, 0xdd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, 0x36, 0x6b, 0x68, 0x1d, 0x06, + 0x37, 0x79, 0xa4, 0x13, 0x61, 0x68, 0x78, 0x22, 0x3b, 0x08, 0x4b, 0x2a, 0x18, 0x0a, 0x7f, 0x58, + 0x28, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x64, 0x54, 0xc4, 0x16, 0x11, 0x30, 0x72, 0xf6, 0x60, 0x51, + 0x76, 0xf8, 0x55, 0x23, 0x8e, 0xfb, 0x82, 0x35, 0x8e, 0x74, 0x56, 0x3b, 0x77, 0xda, 0x01, 0xcb, + 0x26, 0x20, 0x22, 0x8b, 0x65, 0xce, 0xea, 0x39, 0x49, 0xd4, 0x69, 0x56, 0x2b, 0x22, 0x1c, 0x33, + 0x45, 0x3b, 0x30, 0xba, 0x1b, 0xb6, 0xb6, 0x89, 0x5c, 0xd2, 0x2c, 0xd0, 0x58, 0x8e, 0x34, 0x7b, + 0x5d, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0x5d, 0x6b, 0xae, 0xeb, 0xcc, 0xb0, + 0xc9, 0x9b, 0x76, 0xff, 0xdb, 0x6d, 0xff, 0xe6, 0x5e, 0x44, 0x44, 0x9c, 0xc7, 0xcc, 0xee, 0x7f, + 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, 0xb0, 0x64, 0x82, 0xae, 0x8b, 0xee, 0x61, 0xbb, 0xe7, 0x44, + 0x7e, 0x10, 0xe9, 0x39, 0x49, 0x94, 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb5, + 0xed, 0x47, 0xbe, 0x97, 0xd8, 0xa1, 0x27, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, + 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, 0x8c, 0xb5, 0xfc, 0x20, 0xba, 0xe5, 0x07, 0x72, 0x7e, 0xa1, + 0x0e, 0x8a, 0x52, 0x83, 0x52, 0xd4, 0xc8, 0x42, 0xa8, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, + 0x83, 0x61, 0xcd, 0x69, 0x90, 0x95, 0xab, 0xd3, 0x53, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, + 0x2e, 0x1e, 0xa8, 0x86, 0x93, 0x60, 0xc9, 0x0e, 0x2d, 0x43, 0x3f, 0x4b, 0xd2, 0xca, 0x82, 0x92, + 0xe6, 0xc4, 0xc2, 0x4e, 0x3d, 0xf7, 0xe0, 0x7b, 0x13, 0x03, 0x63, 0x5e, 0x9c, 0xae, 0x01, 0xa1, + 0x29, 0xf0, 0xc3, 0xe9, 0xe3, 0xf9, 0x6b, 0x40, 0x28, 0x18, 0xae, 0x56, 0x3b, 0xad, 0x01, 0x45, + 0x84, 0x63, 0xa6, 0x74, 0x67, 0xa6, 0xbb, 0xe9, 0x89, 0x0e, 0xae, 0x7c, 0xb9, 0x7b, 0x29, 0xdb, + 0x99, 0xe9, 0x4e, 0x4a, 0x59, 0xd8, 0xbf, 0x3e, 0x94, 0x96, 0x59, 0x98, 0x86, 0xe9, 0xff, 0xb0, + 0x52, 0xce, 0x07, 0x1f, 0xe8, 0x55, 0xe1, 0x7d, 0x88, 0x17, 0xd7, 0xcf, 0x59, 0x70, 0xa2, 0x95, + 0xf9, 0x21, 0x42, 0x00, 0xe8, 0x4d, 0x6f, 0xce, 0x3f, 0x5d, 0x05, 0xb0, 0xcd, 0xc6, 0xe3, 0x9c, + 0x9a, 0x92, 0xca, 0x81, 0xe2, 0x3b, 0x56, 0x0e, 0xac, 0xc1, 0x50, 0x8d, 0xdf, 0xe4, 0x64, 0xe0, + 0xf5, 0x9e, 0xc2, 0x2f, 0x32, 0x51, 0x42, 0x5c, 0x01, 0x37, 0xb1, 0x62, 0x81, 0x7e, 0xd8, 0x82, + 0xd3, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xea, 0x2d, 0x57, 0x6b, 0x2d, 0x8b, 0xef, 0x4f, 0xc9, + 0xff, 0x06, 0xf1, 0xbd, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, 0xaf, 0x36, 0x60, 0x5a, + 0x14, 0x7b, 0xd0, 0xad, 0xbd, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, 0xf3, 0x4f, 0x78, 0x21, + 0x31, 0xef, 0x9b, 0x35, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x46, 0x6e, 0xe8, 0xbe, 0x35, 0x72, 0x6f, + 0xc1, 0x88, 0xa7, 0xb9, 0xaa, 0x77, 0xba, 0xc1, 0x0a, 0xed, 0xa2, 0x46, 0xcd, 0x5b, 0xa9, 0x43, + 0xb0, 0xc1, 0xad, 0xb3, 0xb6, 0x0c, 0xde, 0x99, 0xb6, 0xec, 0x68, 0xfd, 0x0d, 0x7f, 0xbe, 0x90, + 0x71, 0x63, 0xe0, 0x5a, 0xb9, 0x57, 0x4d, 0xad, 0xdc, 0xb9, 0xa4, 0x56, 0x2e, 0x65, 0xaa, 0x32, + 0x14, 0x72, 0xbd, 0x67, 0x87, 0xeb, 0x39, 0xa4, 0xee, 0xf7, 0x5a, 0xf0, 0x10, 0xb3, 0x7d, 0xd0, + 0x0a, 0xde, 0xb1, 0xbd, 0xe3, 0xe1, 0xbb, 0xfb, 0xe5, 0x87, 0x56, 0xb3, 0xd9, 0xe1, 0xbc, 0x7a, + 0xec, 0x06, 0x9c, 0xed, 0x76, 0xee, 0x32, 0x1f, 0xd7, 0xba, 0x72, 0x8e, 0x88, 0x7d, 0x5c, 0xeb, + 0x2b, 0x8b, 0x98, 0x61, 0x7a, 0x0d, 0x18, 0x67, 0xff, 0x07, 0x0b, 0x8a, 0x15, 0xbf, 0x7e, 0x04, + 0x37, 0xfa, 0x0f, 0x1b, 0x37, 0xfa, 0x87, 0xb3, 0x4f, 0xfc, 0x7a, 0xae, 0xb1, 0x6f, 0x29, 0x61, + 0xec, 0x3b, 0x9d, 0xc7, 0xa0, 0xb3, 0x69, 0xef, 0xa7, 0x8a, 0x30, 0x5c, 0xf1, 0xeb, 0x6a, 0x9d, + 0xfd, 0x93, 0xfb, 0x79, 0x60, 0x92, 0x9b, 0xef, 0x47, 0xe3, 0xcc, 0x5c, 0x63, 0x65, 0xc8, 0x81, + 0x6f, 0xb3, 0x77, 0x26, 0x37, 0x88, 0xbb, 0xb5, 0x1d, 0x91, 0x7a, 0xf2, 0x73, 0x8e, 0xee, 0x9d, + 0xc9, 0x37, 0x8b, 0x30, 0x9e, 0xa8, 0x1d, 0x35, 0x60, 0xb4, 0xa1, 0x9b, 0x92, 0xc4, 0x3c, 0xbd, + 0x2f, 0x2b, 0x94, 0xf0, 0xd3, 0xd7, 0x40, 0xd8, 0x64, 0x8e, 0x66, 0x01, 0x94, 0x6f, 0x85, 0xd4, + 0xf6, 0xb3, 0x6b, 0x8d, 0x72, 0xbe, 0x08, 0xb1, 0x46, 0x81, 0x5e, 0x84, 0xe1, 0xc8, 0x6f, 0xf9, + 0x0d, 0x7f, 0x6b, 0xef, 0x0a, 0x91, 0xb1, 0x04, 0x95, 0xf7, 0xed, 0x46, 0x8c, 0xc2, 0x3a, 0x1d, + 0xba, 0x0d, 0x93, 0x8a, 0x49, 0xf5, 0x10, 0xcc, 0x6b, 0x4c, 0x6d, 0xb2, 0x9e, 0xe4, 0x88, 0xd3, + 0x95, 0xa0, 0x97, 0x61, 0x8c, 0xb9, 0x01, 0xb3, 0xf2, 0x57, 0xc8, 0x9e, 0x8c, 0x31, 0xcb, 0x24, + 0xec, 0x35, 0x03, 0x83, 0x13, 0x94, 0x68, 0x01, 0x26, 0x9b, 0x6e, 0x98, 0x28, 0x3e, 0xc0, 0x8a, + 0xb3, 0x06, 0xac, 0x25, 0x91, 0x38, 0x4d, 0x6f, 0xff, 0xac, 0x18, 0x63, 0x2f, 0x72, 0xdf, 0x5b, + 0x8e, 0xef, 0xee, 0xe5, 0xf8, 0x0d, 0x0b, 0x26, 0x68, 0xed, 0xcc, 0xb7, 0x51, 0x0a, 0x52, 0x2a, + 0x0b, 0x81, 0xd5, 0x21, 0x0b, 0xc1, 0x39, 0xba, 0x6d, 0xd7, 0xfd, 0x76, 0x24, 0xb4, 0xa3, 0xda, + 0xbe, 0x4c, 0xa1, 0x58, 0x60, 0x05, 0x1d, 0x09, 0x02, 0xf1, 0x1e, 0x5b, 0xa7, 0x23, 0x41, 0x80, + 0x05, 0x56, 0x26, 0x29, 0xe8, 0xcb, 0x4e, 0x52, 0xc0, 0x63, 0x4d, 0x0b, 0x2f, 0x38, 0x21, 0xd2, + 0x6a, 0xb1, 0xa6, 0xa5, 0x7b, 0x5c, 0x4c, 0x63, 0x7f, 0xad, 0x08, 0x23, 0x15, 0xbf, 0x1e, 0x3b, + 0x76, 0xbc, 0x60, 0x38, 0x76, 0x9c, 0x4d, 0x38, 0x76, 0x4c, 0xe8, 0xb4, 0xef, 0xb9, 0x71, 0x7c, + 0xab, 0xdc, 0x38, 0x7e, 0xc3, 0x62, 0xa3, 0xb6, 0xb8, 0x5e, 0xe5, 0xae, 0xb2, 0xe8, 0x22, 0x0c, + 0xb3, 0x1d, 0x8e, 0x05, 0x00, 0x90, 0xde, 0x0e, 0x2c, 0x69, 0xe0, 0x7a, 0x0c, 0xc6, 0x3a, 0x0d, + 0x3a, 0x0f, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0xde, 0x85, 0x6b, 0x02, 0x87, 0x61, 0x85, + 0x45, 0x6f, 0xc4, 0x61, 0x8e, 0x8b, 0xf9, 0x0f, 0x8a, 0xf5, 0xf6, 0xf0, 0x25, 0x92, 0x1f, 0xdb, + 0xd8, 0xbe, 0x01, 0x28, 0x4d, 0xdf, 0x43, 0x20, 0xce, 0xb2, 0x19, 0x88, 0xb3, 0x94, 0x0a, 0xc2, + 0xf9, 0xd7, 0x16, 0x8c, 0x55, 0xfc, 0x3a, 0x5d, 0xba, 0xdf, 0x49, 0xeb, 0x54, 0x8f, 0xf1, 0x3e, + 0xd0, 0x21, 0xc6, 0xfb, 0xa3, 0xd0, 0x5f, 0xf1, 0xeb, 0x5d, 0x82, 0x85, 0xfe, 0x2d, 0x0b, 0x06, + 0x2b, 0x7e, 0xfd, 0x08, 0x0c, 0x2f, 0xaf, 0x9a, 0x86, 0x97, 0x87, 0x72, 0xe6, 0x4d, 0x8e, 0xad, + 0xe5, 0xff, 0xef, 0x83, 0x51, 0xda, 0x4e, 0x7f, 0x4b, 0x0e, 0xa5, 0xd1, 0x6d, 0x56, 0x0f, 0xdd, + 0x46, 0xaf, 0x01, 0x7e, 0xa3, 0xe1, 0xdf, 0x4a, 0x0e, 0xeb, 0x32, 0x83, 0x62, 0x81, 0x45, 0xcf, + 0xc0, 0x50, 0x2b, 0x20, 0xbb, 0xae, 0x2f, 0xe4, 0x6b, 0xcd, 0x8c, 0x55, 0x11, 0x70, 0xac, 0x28, + 0xe8, 0xc5, 0x3b, 0x74, 0x3d, 0x2a, 0x4b, 0xd4, 0x7c, 0xaf, 0xce, 0x6d, 0x13, 0x45, 0x91, 0x88, + 0x48, 0x83, 0x63, 0x83, 0x0a, 0xdd, 0x80, 0x12, 0xfb, 0xcf, 0xb6, 0x9d, 0x83, 0xa7, 0x40, 0x17, + 0xa9, 0x59, 0x05, 0x03, 0x1c, 0xf3, 0x42, 0xcf, 0x01, 0x44, 0x32, 0x99, 0x47, 0x28, 0x82, 0x46, + 0xaa, 0xbb, 0x88, 0x4a, 0xf3, 0x11, 0x62, 0x8d, 0x0a, 0x3d, 0x0d, 0xa5, 0xc8, 0x71, 0x1b, 0xab, + 0xae, 0xc7, 0xec, 0xf7, 0xb4, 0xfd, 0x22, 0x43, 0xaa, 0x00, 0xe2, 0x18, 0x4f, 0x65, 0x41, 0x16, + 0x0e, 0x68, 0x7e, 0x2f, 0x12, 0xc9, 0xc0, 0x8a, 0x5c, 0x16, 0x5c, 0x55, 0x50, 0xac, 0x51, 0xa0, + 0x6d, 0x38, 0xe5, 0x7a, 0x2c, 0x69, 0x0f, 0xa9, 0xee, 0xb8, 0xad, 0x8d, 0xd5, 0xea, 0x75, 0x12, + 0xb8, 0x9b, 0x7b, 0xf3, 0x4e, 0x6d, 0x87, 0x78, 0x32, 0xb9, 0xf5, 0x63, 0xa2, 0x89, 0xa7, 0x56, + 0x3a, 0xd0, 0xe2, 0x8e, 0x9c, 0xec, 0xe7, 0xd9, 0x7c, 0xbf, 0x5a, 0x45, 0x4f, 0x19, 0x5b, 0xc7, + 0x09, 0x7d, 0xeb, 0xb8, 0xb7, 0x5f, 0x1e, 0xb8, 0x5a, 0xd5, 0x62, 0xd2, 0xbc, 0x04, 0xc7, 0x2b, + 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xb2, 0x1f, 0xdc, 0x72, 0x82, 0xba, 0x9c, 0x5e, 0x65, 0x19, 0x95, + 0x87, 0xee, 0x9f, 0xfd, 0x7c, 0x77, 0x31, 0x22, 0xee, 0x3c, 0xcf, 0x24, 0xb6, 0x03, 0x3e, 0xb7, + 0xac, 0x31, 0xd9, 0x41, 0xa5, 0xbd, 0xba, 0xe4, 0x44, 0x04, 0x5d, 0x85, 0xd1, 0x9a, 0x7e, 0x8c, + 0x8a, 0xe2, 0x4f, 0xca, 0x83, 0xcc, 0x38, 0x63, 0x33, 0xcf, 0x5d, 0xb3, 0xbc, 0xfd, 0x59, 0x51, + 0x09, 0x57, 0x44, 0x70, 0x97, 0xd6, 0x5e, 0xf2, 0xbf, 0xcb, 0xbc, 0x38, 0x85, 0xfc, 0x98, 0x87, + 0xdc, 0xae, 0xdc, 0x31, 0x2f, 0x8e, 0xfd, 0xdd, 0x70, 0x22, 0x59, 0x7d, 0xcf, 0x49, 0xe8, 0x17, + 0x60, 0x32, 0xd0, 0x0b, 0x6a, 0x49, 0x06, 0x8f, 0xf3, 0x5c, 0x26, 0x09, 0x24, 0x4e, 0xd3, 0xdb, + 0x2f, 0xc2, 0x24, 0xbd, 0xfc, 0x2a, 0x41, 0x8e, 0xf5, 0x72, 0xf7, 0xf0, 0x44, 0xff, 0xb1, 0x9f, + 0x1d, 0x44, 0x89, 0x8c, 0x53, 0xe8, 0x93, 0x30, 0x16, 0x92, 0x55, 0xd7, 0x6b, 0xdf, 0x96, 0xba, + 0xb5, 0x0e, 0xef, 0x8c, 0xab, 0x4b, 0x3a, 0x25, 0xbf, 0x3f, 0x98, 0x30, 0x9c, 0xe0, 0x86, 0x9a, + 0x30, 0x76, 0xcb, 0xf5, 0xea, 0xfe, 0xad, 0x50, 0xf2, 0x1f, 0xca, 0x57, 0xd4, 0xdf, 0xe0, 0x94, + 0x89, 0x36, 0x1a, 0xd5, 0xdd, 0x30, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0xb6, 0x37, 0x17, + 0x5e, 0x0b, 0x09, 0x7f, 0x39, 0x2a, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, + 0x2e, 0x05, 0x7e, 0x9b, 0xa7, 0x37, 0x12, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, + 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x3d, 0x9f, 0x06, 0xc7, 0x06, 0x15, + 0x5a, 0x06, 0x14, 0xb6, 0x5b, 0xad, 0x06, 0x73, 0x5d, 0x74, 0x1a, 0x8c, 0x15, 0x77, 0xbb, 0x2a, + 0x72, 0xef, 0x96, 0x6a, 0x0a, 0x8b, 0x33, 0x4a, 0xd0, 0x73, 0x71, 0x53, 0x34, 0xb5, 0x9f, 0x35, + 0x95, 0x1b, 0xf5, 0xaa, 0xbc, 0x9d, 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x51, 0x23, + 0xec, 0x94, 0x0c, 0xb1, 0xca, 0x48, 0xb4, 0x5c, 0xbc, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, + 0x04, 0xc7, 0x85, 0x6d, 0xc7, 0x53, 0x29, 0xda, 0xb8, 0xf7, 0xde, 0xc5, 0xbb, 0xfb, 0xe5, 0x29, + 0x51, 0xb3, 0x8e, 0xbe, 0xb7, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, + 0xab, 0xf9, 0xcd, 0x56, 0x25, 0xf0, 0x37, 0xdd, 0x06, 0xe9, 0x64, 0x18, 0xad, 0x1a, 0x94, 0x62, + 0xf2, 0x19, 0x30, 0x9c, 0xe0, 0x66, 0x7f, 0x96, 0xc9, 0x8e, 0x55, 0x77, 0xcb, 0x73, 0xa2, 0x76, + 0x40, 0x50, 0x13, 0x46, 0x5b, 0x6c, 0x77, 0x11, 0x49, 0x87, 0xc4, 0x5c, 0x7f, 0xa1, 0x47, 0xfd, + 0xd7, 0x2d, 0x96, 0x36, 0xd1, 0xf0, 0x83, 0xac, 0xe8, 0xec, 0xb0, 0xc9, 0xdd, 0xfe, 0x17, 0x27, + 0x99, 0xf4, 0x51, 0xe5, 0x4a, 0xad, 0x41, 0xf1, 0x6c, 0x4c, 0x5c, 0x63, 0x67, 0xf2, 0xd5, 0xc7, + 0xf1, 0xb0, 0x88, 0xa7, 0x67, 0x58, 0x96, 0x45, 0x9f, 0x80, 0x31, 0x7a, 0x2b, 0x54, 0x12, 0x40, + 0x38, 0x7d, 0x2c, 0x3f, 0xbc, 0x8f, 0xa2, 0xd2, 0x13, 0x92, 0xe9, 0x85, 0x71, 0x82, 0x19, 0x7a, + 0x83, 0xb9, 0x06, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x5e, 0x80, 0x92, 0xad, 0xc6, 0x04, 0xb5, + 0x61, 0x2a, 0x9d, 0x76, 0x35, 0x9c, 0xb6, 0xf3, 0xc5, 0xeb, 0x74, 0xe6, 0xd4, 0x38, 0x73, 0x54, + 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0x14, 0xb3, 0x68, 0x28, 0x9e, 0x53, 0x89, 0x31, + 0x47, 0x3b, 0xe6, 0xc3, 0xdc, 0x82, 0xd3, 0x5a, 0x5e, 0xc1, 0x4b, 0x81, 0xc3, 0x5c, 0x53, 0x5c, + 0xb6, 0x9d, 0x6a, 0x72, 0xd1, 0x23, 0x77, 0xf7, 0xcb, 0xa7, 0x37, 0x3a, 0x11, 0xe2, 0xce, 0x7c, + 0xd0, 0x55, 0x38, 0xce, 0x83, 0x53, 0x2c, 0x12, 0xa7, 0xde, 0x70, 0x3d, 0x25, 0x78, 0xf1, 0x25, + 0x7f, 0xf2, 0xee, 0x7e, 0xf9, 0xf8, 0x5c, 0x16, 0x01, 0xce, 0x2e, 0x87, 0x5e, 0x85, 0x52, 0xdd, + 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0xa9, 0x1b, 0x4b, 0x8b, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, + 0x17, 0x40, 0x5b, 0xdc, 0xf2, 0xa1, 0xd4, 0x55, 0x83, 0xa9, 0x98, 0x85, 0x49, 0x8d, 0xae, 0xf1, + 0x3c, 0x9d, 0x9b, 0xfc, 0xd4, 0xab, 0x2d, 0xe3, 0xe5, 0xba, 0xc1, 0x18, 0xbd, 0x0e, 0x48, 0xa4, + 0x08, 0x99, 0xab, 0xb1, 0x8c, 0x56, 0x9a, 0x3b, 0xa2, 0xba, 0x85, 0x56, 0x53, 0x14, 0x38, 0xa3, + 0x14, 0xba, 0x4c, 0x77, 0x15, 0x1d, 0x2a, 0x76, 0x2d, 0x95, 0x20, 0x78, 0x91, 0xb4, 0x02, 0xc2, + 0x3c, 0xe8, 0x4c, 0x8e, 0x38, 0x51, 0x0e, 0xd5, 0xe1, 0x94, 0xd3, 0x8e, 0x7c, 0x66, 0x54, 0x32, + 0x49, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0x7b, 0xee, 0x10, 0x8b, 0x85, 0x78, 0x6a, 0xae, 0x03, 0x1d, + 0xee, 0xc8, 0x85, 0x4a, 0xe4, 0x32, 0xe7, 0xbf, 0xb0, 0xf7, 0x18, 0x2f, 0x6d, 0xb9, 0x11, 0x54, + 0x52, 0xa0, 0x17, 0x61, 0x78, 0xdb, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf2, 0x83, 0x1d, 0x11, 0x93, + 0x3c, 0xce, 0x03, 0x11, 0xa3, 0xb0, 0x4e, 0x47, 0xaf, 0xdc, 0xcc, 0xdb, 0x68, 0x65, 0x91, 0x39, + 0x7a, 0x0c, 0xc5, 0x7b, 0xcc, 0x65, 0x0e, 0xc6, 0x12, 0x2f, 0x49, 0x57, 0x2a, 0x0b, 0xcc, 0x69, + 0x23, 0x41, 0xba, 0x52, 0x59, 0xc0, 0x12, 0x4f, 0xa7, 0x6b, 0xb8, 0xed, 0x04, 0xa4, 0x12, 0xf8, + 0x35, 0x12, 0x6a, 0xd9, 0x47, 0x1e, 0xe6, 0x11, 0xd7, 0xe9, 0x74, 0xad, 0x66, 0x11, 0xe0, 0xec, + 0x72, 0x88, 0xa4, 0x73, 0x6a, 0x8e, 0xe5, 0x5b, 0xdb, 0xd2, 0xf2, 0x4c, 0x8f, 0x69, 0x35, 0x3d, + 0x98, 0x50, 0xd9, 0x3c, 0x79, 0x8c, 0xf5, 0x70, 0x7a, 0x9c, 0xcd, 0xed, 0xde, 0x03, 0xb4, 0x2b, + 0xfb, 0xe5, 0x4a, 0x82, 0x13, 0x4e, 0xf1, 0x36, 0x82, 0x6d, 0x4e, 0x74, 0x0d, 0xb6, 0x79, 0x01, + 0x4a, 0x61, 0xfb, 0x66, 0xdd, 0x6f, 0x3a, 0xae, 0xc7, 0x9c, 0x36, 0xb4, 0xbb, 0x5f, 0x55, 0x22, + 0x70, 0x4c, 0x83, 0x96, 0x61, 0xc8, 0x91, 0xc6, 0x49, 0x94, 0x1f, 0x47, 0x4c, 0x99, 0x24, 0x79, + 0x68, 0x1d, 0x69, 0x8e, 0x54, 0x65, 0xd1, 0x2b, 0x30, 0x2a, 0x82, 0x2b, 0x88, 0x04, 0xd8, 0x53, + 0xe6, 0x0b, 0xd8, 0xaa, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x06, 0xc3, 0x91, 0xdf, 0x60, 0xcf, 0x38, + 0xa9, 0x98, 0x77, 0x22, 0x3f, 0xdc, 0xe7, 0x86, 0x22, 0xd3, 0xd5, 0xe6, 0xaa, 0x28, 0xd6, 0xf9, + 0xa0, 0x0d, 0x3e, 0xdf, 0x59, 0xae, 0x11, 0x12, 0x8a, 0x0c, 0xca, 0xa7, 0xf3, 0x3c, 0xee, 0x18, + 0x99, 0xb9, 0x1c, 0x44, 0x49, 0xac, 0xb3, 0x41, 0x97, 0x60, 0xb2, 0x15, 0xb8, 0x3e, 0x9b, 0x13, + 0xca, 0xd8, 0x3a, 0x6d, 0x66, 0x16, 0xac, 0x24, 0x09, 0x70, 0xba, 0x0c, 0x8b, 0x8d, 0x21, 0x80, + 0xd3, 0x27, 0x79, 0x76, 0x24, 0x7e, 0x95, 0xe6, 0x30, 0xac, 0xb0, 0x68, 0x8d, 0xed, 0xc4, 0x5c, + 0x0b, 0x34, 0x3d, 0x93, 0x1f, 0xba, 0x4c, 0xd7, 0x16, 0x71, 0xe1, 0x55, 0xfd, 0xc5, 0x31, 0x07, + 0x54, 0xd7, 0x92, 0x12, 0xd3, 0x2b, 0x40, 0x38, 0x7d, 0xaa, 0x83, 0xcb, 0x67, 0xe2, 0x56, 0x16, + 0x0b, 0x04, 0x06, 0x38, 0xc4, 0x09, 0x9e, 0xe8, 0x23, 0x30, 0x21, 0xe2, 0xd0, 0xc6, 0xdd, 0x74, + 0x3a, 0x7e, 0x16, 0x83, 0x13, 0x38, 0x9c, 0xa2, 0xe6, 0xd9, 0x89, 0x9c, 0x9b, 0x0d, 0x22, 0xb6, + 0xbe, 0x55, 0xd7, 0xdb, 0x09, 0xa7, 0xcf, 0xb0, 0xfd, 0x41, 0x64, 0x27, 0x4a, 0x62, 0x71, 0x46, + 0x09, 0xb4, 0x01, 0x13, 0xad, 0x80, 0x90, 0x26, 0x13, 0xf4, 0xc5, 0x79, 0x56, 0xe6, 0xa1, 0x61, + 0x68, 0x4b, 0x2a, 0x09, 0xdc, 0xbd, 0x0c, 0x18, 0x4e, 0x71, 0x40, 0xb7, 0x60, 0xc8, 0xdf, 0x25, + 0xc1, 0x36, 0x71, 0xea, 0xd3, 0x67, 0x3b, 0x3c, 0xd6, 0x12, 0x87, 0xdb, 0x55, 0x41, 0x9b, 0xf0, + 0x65, 0x91, 0xe0, 0xee, 0xbe, 0x2c, 0xb2, 0x32, 0xf4, 0x7f, 0x5a, 0x70, 0x52, 0x5a, 0x87, 0xaa, + 0x2d, 0xda, 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x66, 0xf2, 0x48, 0x7e, 0x80, 0x8f, 0x8d, + 0x9c, 0x42, 0x4a, 0x11, 0x7d, 0x32, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xd2, 0xab, 0x69, 0x48, 0x22, + 0xb9, 0x19, 0xcd, 0x85, 0xcb, 0x6f, 0x2c, 0xae, 0x4f, 0x3f, 0xca, 0x23, 0xb1, 0xd0, 0xc5, 0x50, + 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0x5d, 0x84, 0x82, 0x1f, 0x4e, 0x3f, 0xd6, 0x21, 0x8f, 0xb5, 0x5f, + 0xbf, 0x5a, 0xe5, 0x3e, 0x8d, 0x57, 0xab, 0xb8, 0xe0, 0x87, 0x32, 0x43, 0x10, 0xbd, 0x8f, 0x85, + 0xd3, 0x8f, 0x73, 0xb5, 0xa5, 0xcc, 0x10, 0xc4, 0x80, 0x38, 0xc6, 0xa3, 0x6d, 0x18, 0x0f, 0x8d, + 0x7b, 0x6f, 0x38, 0x7d, 0x8e, 0xf5, 0xd4, 0xe3, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xa9, 0x3b, 0x4c, + 0x2e, 0x38, 0xc9, 0x96, 0xaf, 0x2e, 0xed, 0xe6, 0x1d, 0x4e, 0x3f, 0xd1, 0x65, 0x75, 0x69, 0xc4, + 0xfa, 0xea, 0xd2, 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x82, 0xc9, 0x94, 0xb8, 0x74, 0x10, 0xff, + 0xfd, 0x99, 0x1d, 0x18, 0x35, 0xa6, 0xe4, 0x03, 0x75, 0xef, 0xf8, 0x9d, 0x12, 0x94, 0x94, 0xd9, + 0x1d, 0x5d, 0x30, 0x3d, 0x3a, 0x4e, 0x26, 0x3d, 0x3a, 0x86, 0x2a, 0x7e, 0xdd, 0x70, 0xe2, 0xd8, + 0xc8, 0x88, 0xd7, 0x99, 0xb7, 0x01, 0xf6, 0xfe, 0xc8, 0x48, 0x33, 0x25, 0x14, 0x7b, 0x76, 0x0d, + 0xe9, 0xeb, 0x68, 0x9d, 0xb8, 0x04, 0x93, 0x9e, 0xcf, 0x64, 0x74, 0x52, 0x97, 0x02, 0x18, 0x93, + 0xb3, 0x4a, 0x7a, 0x00, 0xac, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, 0x41, 0x29, 0x69, 0x0e, + 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0xe9, 0x89, 0xfc, 0xbb, 0x21, 0x2f, + 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0xed, 0x7f, 0xcb, 0xaf, 0xaf, 0x54, 0x84, 0x98, 0xaf, + 0x45, 0xd2, 0xae, 0xaf, 0x54, 0x30, 0xc7, 0xa1, 0x39, 0x18, 0x60, 0x3f, 0xc2, 0xe9, 0x91, 0xfc, + 0x80, 0x49, 0xac, 0x84, 0x96, 0xa1, 0x90, 0x15, 0xc0, 0xa2, 0x20, 0xd3, 0xee, 0xd2, 0xbb, 0x11, + 0xd3, 0xee, 0x0e, 0xde, 0xa7, 0x76, 0x57, 0x32, 0xc0, 0x31, 0x2f, 0x74, 0x1b, 0x8e, 0x1b, 0xf7, + 0x51, 0xf5, 0xea, 0x0a, 0xf2, 0x0d, 0xbf, 0x09, 0xe2, 0xf9, 0xd3, 0xa2, 0xd1, 0xc7, 0x57, 0xb2, + 0x38, 0xe1, 0xec, 0x0a, 0x50, 0x03, 0x26, 0x6b, 0xa9, 0x5a, 0x87, 0x7a, 0xaf, 0x55, 0xcd, 0x8b, + 0x74, 0x8d, 0x69, 0xc6, 0xe8, 0x15, 0x18, 0x7a, 0xdb, 0xe7, 0x4e, 0x5a, 0xe2, 0x6a, 0x22, 0x23, + 0x7e, 0x0c, 0xbd, 0x71, 0xb5, 0xca, 0xe0, 0xf7, 0xf6, 0xcb, 0xc3, 0x15, 0xbf, 0x2e, 0xff, 0x62, + 0x55, 0x00, 0xfd, 0x80, 0x05, 0x33, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, 0x45, + 0xa5, 0x33, 0x4b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe8, 0x7a, 0x0a, 0xdd, 0x3b, 0x44, + 0xa4, 0x77, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0x6f, 0xbf, 0x3c, 0xce, 0x77, 0x46, 0xf7, 0x8e, + 0x8a, 0xf9, 0xcd, 0x0b, 0xa0, 0xef, 0x86, 0xe3, 0x41, 0x5a, 0x83, 0x4a, 0xa4, 0x10, 0xfe, 0x54, + 0x2f, 0xbb, 0x6c, 0x72, 0xc0, 0x71, 0x16, 0x43, 0x9c, 0x5d, 0x8f, 0xfd, 0xab, 0x16, 0xd3, 0x6f, + 0x8b, 0x66, 0x91, 0xb0, 0xdd, 0x38, 0x8a, 0xa4, 0xf2, 0x4b, 0x86, 0xed, 0xf8, 0xbe, 0x3d, 0x9b, + 0xfe, 0xb1, 0xc5, 0x3c, 0x9b, 0x8e, 0xf0, 0x8d, 0xd6, 0x1b, 0x30, 0x14, 0xc9, 0x64, 0xff, 0x1d, + 0xf2, 0xe0, 0x6b, 0x8d, 0x62, 0xde, 0x5d, 0xea, 0x92, 0xa3, 0xf2, 0xfa, 0x2b, 0x36, 0xf6, 0x3f, + 0xe0, 0x23, 0x20, 0x31, 0x47, 0x60, 0xa2, 0x5b, 0x34, 0x4d, 0x74, 0xe5, 0x2e, 0x5f, 0x90, 0x63, + 0xaa, 0xfb, 0xfb, 0x66, 0xbb, 0x99, 0x72, 0xef, 0xdd, 0xee, 0x52, 0x67, 0x7f, 0xc1, 0x02, 0x88, + 0x93, 0x2c, 0xf4, 0x90, 0xce, 0xf5, 0x25, 0x7a, 0xad, 0xf1, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0x81, + 0xe2, 0x54, 0x6c, 0x25, 0xe4, 0xf0, 0x7b, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x3d, 0x2d, + 0xc6, 0x76, 0x6b, 0x23, 0xe2, 0xe9, 0x97, 0x2d, 0x38, 0x96, 0xe5, 0xf0, 0x4f, 0x2f, 0xc9, 0x5c, + 0xcd, 0xa9, 0xdc, 0x1d, 0xd5, 0x68, 0x5e, 0x17, 0x70, 0xac, 0x28, 0x7a, 0xce, 0x93, 0x7b, 0xb0, + 0x04, 0x00, 0x57, 0x61, 0xb4, 0x12, 0x10, 0x4d, 0xbe, 0x78, 0x8d, 0x47, 0xd2, 0xe1, 0xed, 0x79, + 0xe6, 0xc0, 0x51, 0x74, 0xec, 0xaf, 0x14, 0xe0, 0x18, 0x77, 0xda, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, + 0xc5, 0xaf, 0x8b, 0x67, 0x9a, 0x6f, 0xc2, 0x48, 0x4b, 0xd3, 0x4d, 0x77, 0x0a, 0x66, 0xad, 0xeb, + 0xb0, 0x63, 0x6d, 0x9a, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xe7, + 0x47, 0xe1, 0xc0, 0x87, 0xb4, 0xaa, 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x3d, 0xbb, 0xda, 0x6a, + 0x22, 0x5a, 0x5f, 0x17, 0x6f, 0x8f, 0x1f, 0xb5, 0xe0, 0xa1, 0x9c, 0xd0, 0xd7, 0xb4, 0xba, 0x5b, + 0xcc, 0x3d, 0x4a, 0x4c, 0x5b, 0x55, 0x1d, 0x77, 0x9a, 0xc2, 0x02, 0x8b, 0x3e, 0x0a, 0xc0, 0x9d, + 0x9e, 0x88, 0x57, 0xeb, 0x1a, 0x23, 0xd8, 0x08, 0x6f, 0xaa, 0x45, 0xaa, 0x94, 0xe5, 0xb1, 0xc6, + 0xcb, 0xfe, 0x72, 0x1f, 0xf4, 0x33, 0x27, 0x1b, 0x54, 0x81, 0xc1, 0x6d, 0x9e, 0x25, 0xae, 0xe3, + 0xb8, 0x51, 0x5a, 0x99, 0x78, 0x2e, 0x1e, 0x37, 0x0d, 0x8a, 0x25, 0x1b, 0xb4, 0x06, 0x53, 0x3c, + 0x59, 0x5f, 0x63, 0x91, 0x34, 0x9c, 0x3d, 0xa9, 0xf6, 0xe5, 0xf9, 0xe7, 0x95, 0xfa, 0x7b, 0x25, + 0x4d, 0x82, 0xb3, 0xca, 0xa1, 0xd7, 0x60, 0x8c, 0x5e, 0xc3, 0xfd, 0x76, 0x24, 0x39, 0xf1, 0x34, + 0x7d, 0xea, 0x66, 0xb2, 0x61, 0x60, 0x71, 0x82, 0x1a, 0xbd, 0x02, 0xa3, 0xad, 0x94, 0x82, 0xbb, + 0x3f, 0xd6, 0x04, 0x99, 0x4a, 0x6d, 0x93, 0x96, 0xf9, 0xfc, 0xb7, 0xd9, 0x0b, 0x87, 0x8d, 0xed, + 0x80, 0x84, 0xdb, 0x7e, 0xa3, 0xce, 0x24, 0xe0, 0x7e, 0xcd, 0xe7, 0x3f, 0x81, 0xc7, 0xa9, 0x12, + 0x94, 0xcb, 0xa6, 0xe3, 0x36, 0xda, 0x01, 0x89, 0xb9, 0x0c, 0x98, 0x5c, 0x96, 0x13, 0x78, 0x9c, + 0x2a, 0xd1, 0x5d, 0x73, 0x3f, 0x78, 0x38, 0x9a, 0x7b, 0xfb, 0xa7, 0x0b, 0x60, 0x0c, 0xed, 0x77, + 0x70, 0xfa, 0xc0, 0x57, 0xa1, 0x6f, 0x2b, 0x68, 0xd5, 0x84, 0x43, 0x59, 0xe6, 0x97, 0xc5, 0xb9, + 0xc3, 0xf9, 0x97, 0xd1, 0xff, 0x98, 0x95, 0xa2, 0x6b, 0xfc, 0x78, 0x25, 0xf0, 0xe9, 0x21, 0x27, + 0x63, 0x2d, 0xaa, 0xa7, 0x35, 0x83, 0x32, 0x48, 0x44, 0x87, 0xa8, 0xc4, 0xe2, 0x7d, 0x00, 0xe7, + 0x60, 0xf8, 0x5e, 0x55, 0x45, 0x28, 0x18, 0xc9, 0x05, 0x5d, 0x84, 0x61, 0x91, 0xd1, 0x8d, 0xbd, + 0x00, 0xe1, 0x8b, 0x89, 0xf9, 0x8a, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0xd8, 0x3f, 0x58, 0x80, 0xa9, + 0x8c, 0x27, 0x7c, 0xfc, 0x18, 0xd9, 0x72, 0xc3, 0x48, 0xa5, 0x27, 0xd7, 0x8e, 0x11, 0x0e, 0xc7, + 0x8a, 0x82, 0xee, 0x55, 0xfc, 0xa0, 0x4a, 0x1e, 0x4e, 0xe2, 0x89, 0x8c, 0xc0, 0x1e, 0x30, 0xd1, + 0xf7, 0x59, 0xe8, 0x6b, 0x87, 0x44, 0xc6, 0x13, 0x57, 0xc7, 0x36, 0x33, 0x6b, 0x33, 0x0c, 0xbd, + 0x02, 0x6e, 0x29, 0x0b, 0xb1, 0x76, 0x05, 0xe4, 0x36, 0x62, 0x8e, 0xa3, 0x8d, 0x8b, 0x88, 0xe7, + 0x78, 0x91, 0xb8, 0x28, 0xc6, 0x81, 0x71, 0x19, 0x14, 0x0b, 0xac, 0xfd, 0xa5, 0x22, 0x9c, 0xcc, + 0x7d, 0xd4, 0x4b, 0x9b, 0xde, 0xf4, 0x3d, 0x37, 0xf2, 0x95, 0x13, 0x1e, 0x0f, 0x86, 0x4b, 0x5a, + 0xdb, 0x6b, 0x02, 0x8e, 0x15, 0x05, 0x3a, 0x07, 0xfd, 0x4c, 0x29, 0x9e, 0x4a, 0xd4, 0x3e, 0xbf, + 0xc8, 0xa3, 0x23, 0x72, 0xb4, 0x76, 0xaa, 0x17, 0x3b, 0x9e, 0xea, 0x8f, 0x52, 0x09, 0xc6, 0x6f, + 0x24, 0x0f, 0x14, 0xda, 0x5c, 0xdf, 0x6f, 0x60, 0x86, 0x44, 0x8f, 0x8b, 0xfe, 0x4a, 0x78, 0x9d, + 0x61, 0xa7, 0xee, 0x87, 0x5a, 0xa7, 0x3d, 0x09, 0x83, 0x3b, 0x64, 0x2f, 0x70, 0xbd, 0xad, 0xa4, + 0x37, 0xe2, 0x15, 0x0e, 0xc6, 0x12, 0x6f, 0xe6, 0x0c, 0x1e, 0x3c, 0x8c, 0x9c, 0xc1, 0xfa, 0x0c, + 0x18, 0xea, 0x2a, 0x9e, 0xfc, 0x50, 0x11, 0xc6, 0xf1, 0xfc, 0xe2, 0x7b, 0x03, 0x71, 0x2d, 0x3d, + 0x10, 0x87, 0x91, 0x5a, 0xf7, 0x60, 0xa3, 0xf1, 0x4b, 0x16, 0x8c, 0xb3, 0xbc, 0x72, 0x22, 0x22, + 0x87, 0xeb, 0x7b, 0x47, 0x70, 0x15, 0x78, 0x14, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0x43, 0x3b, 0x6b, + 0x09, 0xe6, 0x38, 0x74, 0x0a, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, 0x78, 0xd1, 0x89, + 0x1c, 0xcc, 0xa0, 0x2c, 0x36, 0x20, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xbb, 0x2c, 0xbc, 0x3b, + 0xc2, 0x7d, 0x64, 0x36, 0xed, 0x9d, 0xc5, 0x06, 0xcc, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0x17, 0x05, + 0x38, 0x93, 0x59, 0xae, 0xe7, 0xd8, 0x80, 0x9d, 0x4b, 0x3f, 0xc8, 0x14, 0x59, 0xc5, 0x23, 0xf4, + 0xf5, 0xee, 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0x64, 0x5f, 0x66, 0x97, 0xbd, 0x4b, 0x42, 0xf6, + 0x65, 0xb6, 0x2d, 0x47, 0x4d, 0xf0, 0x37, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0xf3, 0x74, 0x9f, + 0x61, 0xc8, 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, + 0x8f, 0x6e, 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x19, 0x6b, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, + 0x85, 0xf3, 0xe3, 0x5f, 0xf7, 0xca, 0x81, 0x56, 0xdd, 0xac, 0xe9, 0xce, 0xa1, 0x7a, 0x31, 0x23, + 0xb4, 0xdf, 0x9a, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, + 0xa3, 0xf7, 0x6d, 0x1b, 0xb1, 0xbf, 0x51, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, + 0xa0, 0xed, 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0xb1, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x1e, 0x35, 0x91, + 0xba, 0xa4, 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x1c, 0x5b, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, + 0x8b, 0x9e, 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x25, 0x98, + 0x74, 0x76, 0x1d, 0x97, 0xe7, 0x44, 0x90, 0x0c, 0xf8, 0x1d, 0x4b, 0xe9, 0xa2, 0xe7, 0x92, 0x04, + 0x38, 0x5d, 0x06, 0xbd, 0x0e, 0xc8, 0xbf, 0xc9, 0x1e, 0x4a, 0xd4, 0x2f, 0x11, 0x4f, 0x58, 0xdd, + 0xd9, 0xd8, 0x15, 0xe3, 0x2d, 0xe1, 0x6a, 0x8a, 0x02, 0x67, 0x94, 0x4a, 0x04, 0x96, 0x1b, 0xc8, + 0x0f, 0x2c, 0xd7, 0x79, 0x5f, 0xec, 0x9a, 0x9d, 0xed, 0x22, 0x8c, 0x1e, 0xd0, 0xfd, 0xd7, 0xfe, + 0xb7, 0x16, 0x28, 0x05, 0xb1, 0x19, 0x18, 0xfa, 0x15, 0xe6, 0x9f, 0xcc, 0x55, 0xdb, 0x5a, 0x2c, + 0xa8, 0xe3, 0x9a, 0x7f, 0x72, 0x8c, 0xc4, 0x26, 0x2d, 0x9f, 0x43, 0x9a, 0x5f, 0xb1, 0x71, 0x2b, + 0x10, 0x71, 0x2b, 0x15, 0x05, 0xfa, 0x18, 0x0c, 0xd6, 0xdd, 0x5d, 0x37, 0x14, 0xca, 0xb1, 0x03, + 0x1b, 0xe3, 0xe2, 0xad, 0x73, 0x91, 0xb3, 0xc1, 0x92, 0x9f, 0xfd, 0x43, 0x85, 0xb8, 0x4f, 0xde, + 0x68, 0xfb, 0x91, 0x73, 0x04, 0x27, 0xf9, 0x25, 0xe3, 0x24, 0x7f, 0x3c, 0x7b, 0xa0, 0xb5, 0x26, + 0xe5, 0x9e, 0xe0, 0x57, 0x13, 0x27, 0xf8, 0x13, 0xdd, 0x59, 0x75, 0x3e, 0xb9, 0xff, 0xa1, 0x05, + 0x93, 0x06, 0xfd, 0x11, 0x1c, 0x20, 0xcb, 0xe6, 0x01, 0xf2, 0x48, 0xd7, 0x6f, 0xc8, 0x39, 0x38, + 0xbe, 0xbf, 0x98, 0x68, 0x3b, 0x3b, 0x30, 0xde, 0x86, 0xbe, 0x6d, 0x27, 0xa8, 0x77, 0x4a, 0x59, + 0x94, 0x2a, 0x34, 0x7b, 0xd9, 0x09, 0x84, 0xa7, 0xc2, 0x33, 0xb2, 0xd7, 0x29, 0xa8, 0xab, 0x97, + 0x02, 0xab, 0x0a, 0xbd, 0x04, 0x03, 0x61, 0xcd, 0x6f, 0xa9, 0x37, 0x53, 0x2c, 0xe5, 0x6f, 0x95, + 0x41, 0xee, 0xed, 0x97, 0x91, 0x59, 0x1d, 0x05, 0x63, 0x41, 0x8f, 0xde, 0x84, 0x51, 0xf6, 0x4b, + 0xb9, 0x0d, 0x16, 0xf3, 0x35, 0x18, 0x55, 0x9d, 0x90, 0xfb, 0xd4, 0x1a, 0x20, 0x6c, 0xb2, 0x9a, + 0xd9, 0x82, 0x92, 0xfa, 0xac, 0x07, 0x6a, 0xed, 0xfe, 0x57, 0x45, 0x98, 0xca, 0x98, 0x73, 0x28, + 0x34, 0x46, 0xe2, 0x62, 0x8f, 0x53, 0xf5, 0x1d, 0x8e, 0x45, 0xc8, 0x2e, 0x50, 0x75, 0x31, 0xb7, + 0x7a, 0xae, 0xf4, 0x5a, 0x48, 0x92, 0x95, 0x52, 0x50, 0xf7, 0x4a, 0x69, 0x65, 0x47, 0xd6, 0xd5, + 0xb4, 0x22, 0xd5, 0xd2, 0x07, 0x3a, 0xa6, 0xbf, 0xd1, 0x07, 0xc7, 0xb2, 0xe2, 0x09, 0xa3, 0xcf, + 0x24, 0xf2, 0x88, 0xbf, 0xd0, 0xa9, 0x87, 0xf5, 0x92, 0x3c, 0xb9, 0xb8, 0x08, 0xe3, 0x39, 0x6b, + 0x66, 0x16, 0xef, 0xda, 0xcd, 0xa2, 0x4e, 0x16, 0x5e, 0x27, 0xe0, 0xf9, 0xdf, 0xe5, 0xf6, 0xf1, + 0x81, 0x9e, 0x1b, 0x20, 0x12, 0xc7, 0x87, 0x09, 0x97, 0x24, 0x09, 0xee, 0xee, 0x92, 0x24, 0x6b, + 0x46, 0x2b, 0x30, 0x50, 0xe3, 0xbe, 0x2e, 0xc5, 0xee, 0x5b, 0x18, 0x77, 0x74, 0x51, 0x1b, 0xb0, + 0x70, 0x70, 0x11, 0x0c, 0x66, 0x5c, 0x18, 0xd6, 0x3a, 0xe6, 0x81, 0x4e, 0x9e, 0x1d, 0x7a, 0xf0, + 0x69, 0x5d, 0xf0, 0x40, 0x27, 0xd0, 0x8f, 0x5a, 0x90, 0x78, 0xf0, 0xa2, 0x94, 0x72, 0x56, 0xae, + 0x52, 0xee, 0x2c, 0xf4, 0x05, 0x7e, 0x83, 0x24, 0x93, 0x54, 0x63, 0xbf, 0x41, 0x30, 0xc3, 0x50, + 0x8a, 0x28, 0x56, 0xb5, 0x8c, 0xe8, 0xd7, 0x48, 0x71, 0x41, 0x7c, 0x14, 0xfa, 0x1b, 0x64, 0x97, + 0x34, 0x92, 0xb9, 0x04, 0x57, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x4b, 0x7d, 0x70, 0xba, 0x63, 0xac, + 0x2b, 0x7a, 0x19, 0xdb, 0x72, 0x22, 0x72, 0xcb, 0xd9, 0x4b, 0x26, 0xfd, 0xba, 0xc4, 0xc1, 0x58, + 0xe2, 0xd9, 0xf3, 0x4f, 0x9e, 0xbb, 0x23, 0xa1, 0xc2, 0x14, 0x29, 0x3b, 0x04, 0xd6, 0x54, 0x89, + 0x15, 0x0f, 0x43, 0x25, 0xf6, 0x1c, 0x40, 0x18, 0x36, 0xb8, 0x5b, 0x60, 0x5d, 0xbc, 0x2b, 0x8d, + 0x73, 0xbc, 0x54, 0x57, 0x05, 0x06, 0x6b, 0x54, 0x68, 0x11, 0x26, 0x5a, 0x81, 0x1f, 0x71, 0x8d, + 0xf0, 0x22, 0xf7, 0x9c, 0xed, 0x37, 0xc3, 0x0c, 0x55, 0x12, 0x78, 0x9c, 0x2a, 0x81, 0x5e, 0x84, + 0x61, 0x11, 0x7a, 0xa8, 0xe2, 0xfb, 0x0d, 0xa1, 0x84, 0x52, 0xce, 0xa4, 0xd5, 0x18, 0x85, 0x75, + 0x3a, 0xad, 0x18, 0x53, 0x33, 0x0f, 0x66, 0x16, 0xe3, 0xaa, 0x66, 0x8d, 0x2e, 0x11, 0xa6, 0x7c, + 0xa8, 0xa7, 0x30, 0xe5, 0xb1, 0x5a, 0xae, 0xd4, 0xb3, 0xd5, 0x13, 0xba, 0x2a, 0xb2, 0xbe, 0xda, + 0x07, 0x53, 0x62, 0xe2, 0x3c, 0xe8, 0xe9, 0x72, 0x2d, 0x3d, 0x5d, 0x0e, 0x43, 0x71, 0xf7, 0xde, + 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xc3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0xdf, 0x73, 0xb3, 0x26, 0xbe, + 0x98, 0x2b, 0xf9, 0xc5, 0x31, 0x8c, 0xdf, 0x59, 0xfe, 0x44, 0xfb, 0x5f, 0x5b, 0xf0, 0x48, 0x57, + 0x8e, 0x68, 0x09, 0x4a, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, 0x89, 0xc8, 0x91, + 0x6e, 0xe3, 0x92, 0x68, 0x29, 0x95, 0x9e, 0xf2, 0xc9, 0x8c, 0xf4, 0x94, 0xc7, 0x8d, 0xee, 0xb9, + 0xcf, 0xfc, 0x94, 0x5f, 0xa4, 0x27, 0x8e, 0xf1, 0xaa, 0x0d, 0x7d, 0xc0, 0x50, 0x3a, 0xda, 0x09, + 0xa5, 0x23, 0x32, 0xa9, 0xb5, 0x33, 0xe4, 0x23, 0x30, 0xc1, 0x62, 0x12, 0xb2, 0x77, 0x1e, 0xe2, + 0xbd, 0x5d, 0x21, 0xf6, 0xe5, 0x5e, 0x4d, 0xe0, 0x70, 0x8a, 0xda, 0xfe, 0xb3, 0x22, 0x0c, 0xf0, + 0xe5, 0x77, 0x04, 0xd7, 0xcb, 0xa7, 0xa1, 0xe4, 0x36, 0x9b, 0x6d, 0x9e, 0x71, 0xb0, 0x3f, 0xf6, + 0x0c, 0x5e, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0xb2, 0xd0, 0x77, 0x77, 0x08, 0x7b, 0xcc, 0x1b, 0x3e, + 0xbb, 0xe8, 0x44, 0x0e, 0x97, 0x95, 0xd4, 0x39, 0x1b, 0x6b, 0xc6, 0xd1, 0x27, 0x01, 0xc2, 0x28, + 0x70, 0xbd, 0x2d, 0x0a, 0x13, 0xb1, 0xf1, 0x9f, 0xea, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, + 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, + 0x31, 0x9b, 0xf9, 0x20, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, + 0x27, 0xda, 0x76, 0x20, 0xe5, 0xd9, 0x2f, 0x5b, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, + 0xe0, 0x0e, 0x1c, 0x6b, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, + 0x8b, 0x33, 0xeb, 0x40, 0xe7, 0xe9, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xf1, 0x0d, 0x46, 0xf8, + 0x6a, 0xe3, 0x30, 0xac, 0xb0, 0xf6, 0x1f, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x85, 0xec, 0xa9, 0xbd, + 0xe9, 0x5b, 0xd9, 0x76, 0x91, 0xeb, 0xb6, 0x90, 0x93, 0xeb, 0x56, 0xff, 0xb4, 0x62, 0xc7, 0x4f, + 0xfb, 0x8a, 0x05, 0x62, 0x86, 0x1c, 0x81, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, + 0xc8, 0x51, 0x64, 0xfc, 0xb5, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0x61, 0xde, + 0xfc, 0xa2, 0x4c, 0xe7, 0xcb, 0x2b, 0x64, 0x6f, 0xc3, 0xaf, 0x38, 0xd1, 0x76, 0xf6, 0x47, 0x19, + 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0xa9, 0xe0, 0xba, 0x04, 0x08, 0x38, 0x68, + 0x2a, 0x38, 0xfb, 0xcf, 0x2d, 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, + 0x5d, 0xbc, 0x35, 0x29, 0x0c, 0xd6, 0xa8, 0x0e, 0xa5, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, + 0x17, 0x07, 0xe8, 0xd1, 0x7f, 0x3e, 0x00, 0xc9, 0x97, 0x7d, 0xe8, 0x3a, 0x8c, 0xd4, 0x9c, 0x96, + 0x73, 0xd3, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0xde, 0x58, 0x0b, 0x1a, 0x9d, 0x30, 0x91, 0x6b, + 0x10, 0x6c, 0xf0, 0x41, 0xb3, 0x00, 0xad, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x8b, 0xa9, 0x5d, 0x58, + 0x44, 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc0, 0x61, 0x14, + 0xe0, 0xc8, 0xc2, 0x28, 0xf4, 0x1d, 0x28, 0x8c, 0xc2, 0xd0, 0x81, 0xc3, 0x28, 0xf4, 0xf7, 0x14, + 0x46, 0x01, 0xc3, 0x09, 0x29, 0x7b, 0xd2, 0xff, 0xcb, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, + 0x33, 0x73, 0x77, 0xbf, 0x7c, 0x02, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0xa3, 0x30, 0xed, 0x34, + 0x1a, 0xfe, 0x2d, 0x35, 0xa8, 0x4b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0xc8, 0x20, 0xe3, 0x7a, 0xea, + 0xee, 0x7e, 0x79, 0x7a, 0x2e, 0x87, 0x06, 0xe7, 0x96, 0x46, 0xaf, 0x42, 0xa9, 0x15, 0xf8, 0xb5, + 0x35, 0xed, 0xf9, 0xf1, 0x19, 0xda, 0x81, 0x15, 0x09, 0xbc, 0xb7, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, + 0x81, 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x7c, 0xa8, 0x71, 0x11, 0x76, 0x60, 0xaa, 0x4a, 0x02, + 0xd7, 0x69, 0xb8, 0x77, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, + 0x50, 0xc4, 0x5a, 0x36, 0x2e, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0xc1, 0xa0, 0x78, 0xc9, + 0x77, 0x04, 0x52, 0xe3, 0x9c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, + 0x25, 0x61, 0x8e, 0x78, 0xa4, 0x13, 0x93, 0xce, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, + 0x4d, 0xf9, 0x83, 0xef, 0x82, 0x75, 0x18, 0x0c, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, + 0x20, 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x00, 0x1f, 0x4b, + 0x77, 0x7b, 0x75, 0xdf, 0x77, 0x18, 0xaf, 0xee, 0xed, 0xaf, 0xb3, 0x93, 0x53, 0x87, 0x1f, 0x81, + 0x50, 0x75, 0xc9, 0x3c, 0x63, 0xed, 0x0e, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x45, 0x0b, + 0x4e, 0x67, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x03, 0x43, 0x4e, 0xbb, 0xee, 0xaa, 0xb5, 0xac, 0x99, + 0x26, 0xe7, 0x04, 0x1c, 0x2b, 0x0a, 0xb4, 0x00, 0x93, 0xe4, 0x76, 0xcb, 0xe5, 0x86, 0x5c, 0xdd, + 0xf9, 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x25, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, + 0x44, 0xfd, 0xbc, 0x05, 0xc3, 0xea, 0x55, 0xef, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, + 0x87, 0xde, 0xce, 0xe9, 0xe6, 0x3f, 0x28, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, + 0xff, 0x87, 0x13, 0x17, 0x61, 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0xc0, 0xf2, 0x31, + 0x18, 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, + 0x51, 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, + 0xd9, 0x15, 0x2f, 0xba, 0x1a, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, + 0xc1, 0x82, 0xd5, 0xd1, 0x6f, 0xba, 0x52, 0xac, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, + 0xc3, 0xfa, 0xf4, 0x60, 0xe1, 0xc5, 0x7e, 0x72, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0x41, + 0xcc, 0x3a, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x3c, 0xe5, 0x1e, + 0xf3, 0x6c, 0x97, 0x53, 0xe3, 0x00, 0x0e, 0x31, 0x2c, 0xcb, 0x14, 0xcb, 0xc1, 0xb3, 0x52, 0x11, + 0xeb, 0x42, 0xcb, 0x32, 0x25, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, + 0x18, 0xb1, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, + 0x28, 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x17, 0x61, 0x98, 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, + 0x43, 0x7f, 0x1c, 0x3f, 0x73, 0x29, 0x06, 0x63, 0x9d, 0x06, 0x6d, 0xc0, 0x78, 0xc8, 0xf5, 0x6c, + 0x2a, 0x04, 0x3e, 0xd7, 0x57, 0x3e, 0xa5, 0xde, 0x53, 0x9b, 0xe8, 0x7b, 0x0c, 0xc4, 0x77, 0x27, + 0x19, 0x65, 0x22, 0xc9, 0x02, 0xbd, 0x06, 0x63, 0x0d, 0xdf, 0xa9, 0xcf, 0x3b, 0x0d, 0xc7, 0xab, + 0xb1, 0xfe, 0x19, 0x32, 0x73, 0x95, 0xaf, 0x1a, 0x58, 0x9c, 0xa0, 0xa6, 0xc2, 0x9b, 0x0e, 0x11, + 0x61, 0xda, 0x1c, 0x6f, 0x8b, 0x84, 0xd3, 0x25, 0xf6, 0x55, 0x4c, 0x78, 0x5b, 0xcd, 0xa1, 0xc1, + 0xb9, 0xa5, 0xd1, 0x4b, 0x30, 0x22, 0x3f, 0x5f, 0x0b, 0xca, 0x12, 0x3f, 0x89, 0xd1, 0x70, 0xd8, + 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, 0x73, 0xd3, 0xad, 0x89, 0x48, 0x05, 0xfc, + 0xf9, 0xf0, 0x87, 0xe5, 0x5b, 0xc5, 0xa5, 0x2c, 0xa2, 0x7b, 0xfb, 0xe5, 0x53, 0xa2, 0xd7, 0x32, + 0xf1, 0x38, 0x9b, 0x37, 0x5a, 0x83, 0xa9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0x85, 0x6d, 0x52, 0xdb, + 0x91, 0x0b, 0x8e, 0x85, 0x79, 0xd1, 0x9e, 0x8e, 0x5c, 0x4e, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x2d, + 0x98, 0x6e, 0xb5, 0x6f, 0x36, 0xdc, 0x70, 0x7b, 0xdd, 0x8f, 0x98, 0x13, 0xd2, 0x5c, 0xbd, 0x1e, + 0x90, 0x90, 0xbf, 0x2e, 0x65, 0x47, 0xaf, 0x0c, 0xa4, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, 0x1c, 0xd0, + 0x1d, 0x38, 0x9e, 0x98, 0x08, 0x22, 0x22, 0xc6, 0x58, 0x7e, 0x02, 0x9c, 0x6a, 0x56, 0x01, 0x11, + 0x5c, 0x26, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, 0x36, 0xe8, + 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, 0xfc, 0xdb, + 0xc3, 0x1a, 0x35, 0x5a, 0x85, 0x31, 0xf1, 0x6f, 0x4f, 0x0c, 0xe9, 0xa4, 0xca, 0x95, 0x38, 0x26, + 0x4b, 0xa8, 0x71, 0x4c, 0x40, 0x70, 0xa2, 0x2c, 0xda, 0x82, 0xd3, 0x32, 0x51, 0xa3, 0x3e, 0x3f, + 0xe5, 0x18, 0x84, 0x2c, 0xeb, 0xcc, 0x10, 0x7f, 0x95, 0x32, 0xd7, 0x89, 0x10, 0x77, 0xe6, 0x43, + 0xcf, 0x75, 0x7d, 0x9a, 0xf3, 0x37, 0xc7, 0xc7, 0xe3, 0x88, 0x83, 0xab, 0x49, 0x24, 0x4e, 0xd3, + 0x23, 0x1f, 0x8e, 0xbb, 0x5e, 0xd6, 0xac, 0x3e, 0xc1, 0x18, 0x7d, 0x88, 0x3f, 0xb7, 0xee, 0x3c, + 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0x3b, 0xf3, 0xfb, 0xfb, 0x43, 0x8b, 0x96, 0xd6, 0xa4, 0x73, + 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb2, 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, + 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, 0xb6, 0xc1, 0x85, 0xde, 0x24, 0x99, + 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0xb4, 0x0a, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0x68, 0xa5, + 0xd2, 0x29, 0x7a, 0xe3, 0x82, 0xa0, 0x11, 0xeb, 0x47, 0x24, 0x90, 0xe1, 0x30, 0xac, 0x38, 0xd8, + 0x2f, 0xc1, 0x70, 0xb5, 0x41, 0x48, 0x8b, 0x3f, 0xdf, 0x41, 0x4f, 0xb2, 0xdb, 0x04, 0x93, 0x07, + 0x2d, 0x26, 0x0f, 0xea, 0x17, 0x05, 0x26, 0x09, 0x4a, 0xbc, 0xfd, 0x5b, 0x05, 0x28, 0x77, 0xc9, + 0x63, 0x94, 0x30, 0x60, 0x59, 0x3d, 0x19, 0xb0, 0xe6, 0x60, 0x3c, 0xfe, 0xa7, 0xeb, 0xc6, 0x94, + 0x0f, 0xec, 0x75, 0x13, 0x8d, 0x93, 0xf4, 0x3d, 0x3f, 0x67, 0xd0, 0x6d, 0x60, 0x7d, 0x5d, 0x1f, + 0xe4, 0x18, 0xb6, 0xef, 0xfe, 0xde, 0x2f, 0xcc, 0xb9, 0x76, 0x4c, 0xfb, 0xeb, 0x05, 0x38, 0xae, + 0xba, 0xf0, 0x3b, 0xb7, 0xe3, 0xae, 0xa5, 0x3b, 0xee, 0x10, 0xac, 0xc0, 0xf6, 0x55, 0x18, 0xe0, + 0x81, 0x2c, 0x7b, 0x10, 0xd4, 0x1f, 0x35, 0xe3, 0x6b, 0x2b, 0xd9, 0xd0, 0x88, 0xb1, 0xfd, 0x03, + 0x16, 0x8c, 0x27, 0xde, 0xc5, 0x21, 0xac, 0x3d, 0x9e, 0xbe, 0x1f, 0x61, 0x3a, 0x4b, 0x4c, 0x3f, + 0x0b, 0x7d, 0xdb, 0x7e, 0x18, 0x25, 0x5d, 0x44, 0x2e, 0xfb, 0x61, 0x84, 0x19, 0xc6, 0xfe, 0x63, + 0x0b, 0xfa, 0x37, 0x1c, 0xd7, 0x8b, 0xa4, 0x39, 0xc1, 0xca, 0x31, 0x27, 0xf4, 0xf2, 0x5d, 0xe8, + 0x45, 0x18, 0x20, 0x9b, 0x9b, 0xa4, 0x16, 0x89, 0x51, 0x95, 0x41, 0x14, 0x06, 0x96, 0x18, 0x94, + 0x4a, 0x8e, 0xac, 0x32, 0xfe, 0x17, 0x0b, 0x62, 0x74, 0x03, 0x4a, 0x91, 0xdb, 0x24, 0x73, 0xf5, + 0xba, 0x30, 0xb2, 0xdf, 0x47, 0xe4, 0x8f, 0x0d, 0xc9, 0x00, 0xc7, 0xbc, 0xec, 0x2f, 0x15, 0x00, + 0xe2, 0x08, 0x60, 0xdd, 0x3e, 0x71, 0x3e, 0x65, 0x7e, 0x3d, 0x97, 0x61, 0x7e, 0x45, 0x31, 0xc3, + 0x0c, 0xdb, 0xab, 0xea, 0xa6, 0x62, 0x4f, 0xdd, 0xd4, 0x77, 0x90, 0x6e, 0x5a, 0x80, 0xc9, 0x38, + 0x82, 0x99, 0x19, 0xc0, 0x91, 0x1d, 0xba, 0x1b, 0x49, 0x24, 0x4e, 0xd3, 0xdb, 0x04, 0xce, 0xaa, + 0x40, 0x4e, 0xe2, 0x2c, 0x64, 0x1e, 0xe4, 0xba, 0x39, 0xbb, 0x4b, 0x3f, 0xc5, 0xf6, 0xe5, 0x42, + 0xae, 0x7d, 0xf9, 0x27, 0x2c, 0x38, 0x96, 0xac, 0x87, 0x3d, 0xb7, 0xfe, 0x82, 0x05, 0xc7, 0xe3, + 0x34, 0x1e, 0x69, 0x9b, 0xfe, 0x0b, 0x1d, 0x83, 0x53, 0xe5, 0xb4, 0x38, 0x8e, 0xd6, 0xb1, 0x96, + 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xd7, 0x3e, 0x98, 0xce, 0x8b, 0x6a, 0xc5, 0x1e, 0x98, 0x38, + 0xb7, 0xab, 0x3b, 0xe4, 0x96, 0x70, 0xe3, 0x8f, 0x1f, 0x98, 0x70, 0x30, 0x96, 0xf8, 0x64, 0xe6, + 0x96, 0x42, 0x8f, 0x99, 0x5b, 0xb6, 0x61, 0xf2, 0xd6, 0x36, 0xf1, 0xae, 0x79, 0xa1, 0x13, 0xb9, + 0xe1, 0xa6, 0xcb, 0x2c, 0xd2, 0x7c, 0xde, 0xc8, 0xec, 0xe3, 0x93, 0x37, 0x92, 0x04, 0xf7, 0xf6, + 0xcb, 0xa7, 0x0d, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xe2, 0x9b, 0xbe, 0x07, + 0x9c, 0xf8, 0xa6, 0xe9, 0x0a, 0x3f, 0x16, 0xf9, 0x7a, 0x80, 0xdd, 0x35, 0xd7, 0x14, 0x14, 0x6b, + 0x14, 0xe8, 0x13, 0x80, 0xf4, 0xcc, 0x65, 0x46, 0x50, 0xd1, 0x67, 0xef, 0xee, 0x97, 0xd1, 0x7a, + 0x0a, 0x7b, 0x6f, 0xbf, 0x3c, 0x45, 0xa1, 0x2b, 0x1e, 0xbd, 0xb3, 0xc6, 0x91, 0xd8, 0x32, 0x18, + 0xa1, 0x1b, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0x88, 0xa5, 0xfc, 0x9e, 0xf9, 0xf4, 0xdd, 0xfd, + 0xf2, 0xc4, 0x7a, 0x02, 0x97, 0xc7, 0x3a, 0xc5, 0x24, 0x23, 0xff, 0xcd, 0x50, 0xaf, 0xf9, 0x6f, + 0xec, 0x2f, 0x58, 0x70, 0x92, 0x1e, 0x70, 0xf5, 0xd5, 0x1c, 0xb3, 0xb4, 0xd3, 0x72, 0xb9, 0xe1, + 0x43, 0x1c, 0x35, 0x4c, 0xc1, 0x56, 0x59, 0xe1, 0x66, 0x0f, 0x85, 0xa5, 0x3b, 0xfc, 0x8e, 0xeb, + 0xd5, 0x93, 0x3b, 0xfc, 0x15, 0xd7, 0xab, 0x63, 0x86, 0x51, 0x47, 0x56, 0x31, 0xf7, 0x11, 0xc3, + 0x57, 0xe9, 0x5a, 0xa5, 0x6d, 0xf9, 0x96, 0x36, 0x03, 0x3d, 0xad, 0x1b, 0x29, 0x85, 0x3f, 0x62, + 0xae, 0x81, 0xf2, 0xf3, 0x16, 0x88, 0x47, 0xcf, 0x3d, 0x9c, 0xc9, 0x6f, 0xc2, 0xc8, 0x6e, 0x3a, + 0xab, 0xe3, 0xd9, 0xfc, 0x57, 0xe0, 0x22, 0x56, 0xbb, 0x12, 0xd1, 0x8d, 0x0c, 0x8e, 0x06, 0x2f, + 0xbb, 0x0e, 0x02, 0xbb, 0x48, 0x98, 0x29, 0xa2, 0x7b, 0x6b, 0x9e, 0x03, 0xa8, 0x33, 0x5a, 0x96, + 0xea, 0xb9, 0x60, 0x4a, 0x5c, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x77, 0x0b, 0x30, 0x2c, 0xb3, + 0x08, 0xb6, 0xbd, 0x5e, 0x14, 0x86, 0x07, 0x4a, 0x2b, 0x8e, 0x2e, 0x40, 0x89, 0x69, 0xb4, 0x2b, + 0xb1, 0x9e, 0x55, 0xe9, 0x93, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x4c, 0x7c, 0x6f, 0xdf, 0x64, 0xe4, + 0x89, 0x27, 0xba, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, + 0x2d, 0x6e, 0x05, 0xeb, 0x57, 0x71, 0x4f, 0x26, 0xd6, 0x12, 0xb8, 0x7b, 0xfb, 0xe5, 0x63, 0x49, + 0x18, 0x33, 0xef, 0xa6, 0xb8, 0x30, 0x67, 0x37, 0x5e, 0x09, 0xdd, 0xd5, 0x53, 0x3e, 0x72, 0x31, + 0x0a, 0xeb, 0x74, 0xf6, 0xa7, 0x00, 0xa5, 0xf3, 0x29, 0xa2, 0xd7, 0xb9, 0xb3, 0xb4, 0x1b, 0x90, + 0x7a, 0x27, 0x73, 0xaf, 0x1e, 0xdd, 0x43, 0xbe, 0xae, 0xe3, 0xa5, 0xb0, 0x2a, 0x6f, 0xff, 0x60, + 0x1f, 0x4c, 0x24, 0xe3, 0x09, 0xa0, 0xcb, 0x30, 0xc0, 0x45, 0x4a, 0xc1, 0xbe, 0x83, 0x37, 0x91, + 0x16, 0x85, 0x80, 0x1d, 0xae, 0x42, 0x2a, 0x15, 0xe5, 0xd1, 0x5b, 0x30, 0x5c, 0xf7, 0x6f, 0x79, + 0xb7, 0x9c, 0xa0, 0x3e, 0x57, 0x59, 0x11, 0xd3, 0x39, 0x53, 0xc5, 0xb1, 0x18, 0x93, 0xe9, 0x91, + 0x0d, 0x98, 0xe5, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0x29, 0x42, 0x36, 0xdd, 0xad, 0x35, + 0xa7, 0xd5, 0xe9, 0xe5, 0xcc, 0x82, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x3c, 0x22, 0x1c, 0x81, 0x63, + 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x18, 0x5d, 0xf2, 0xd2, 0xeb, 0x76, 0xb2, 0x43, 0xcc, 0x3f, + 0x74, 0x77, 0xbf, 0x3c, 0x95, 0x65, 0x9e, 0xc9, 0xaa, 0x06, 0xdd, 0x06, 0x24, 0x94, 0x9b, 0x1b, + 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0x53, 0x88, 0x64, 0x27, 0xe0, 0x4e, 0x51, 0x6b, + 0x75, 0xb3, 0xf8, 0xa2, 0x69, 0x0a, 0x9c, 0x51, 0x87, 0xfd, 0xf9, 0x3e, 0x98, 0x91, 0x09, 0x4c, + 0x33, 0x5e, 0x08, 0x7c, 0xce, 0x4a, 0x3c, 0x11, 0x78, 0x39, 0x7f, 0x57, 0x7a, 0x60, 0x0f, 0x05, + 0xbe, 0x98, 0x7e, 0x28, 0xf0, 0xea, 0x01, 0x9b, 0x71, 0x68, 0xcf, 0x05, 0xbe, 0x63, 0x7d, 0xfc, + 0xbf, 0x7c, 0x0c, 0x8c, 0x73, 0xc4, 0x48, 0xf8, 0x6f, 0x1d, 0x52, 0xc2, 0x7f, 0x0c, 0x43, 0xa4, + 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0xb4, 0x38, 0x93, 0xe7, 0x92, 0xa0, 0x49, 0xf3, 0x94, 0x18, + 0xac, 0xf8, 0xa0, 0x5d, 0x98, 0xdc, 0xaa, 0x91, 0x44, 0xce, 0xef, 0x62, 0xfe, 0xba, 0xbd, 0xb4, + 0xb0, 0xd4, 0x21, 0xe1, 0x37, 0xbb, 0xa9, 0xa4, 0x48, 0x70, 0xba, 0x0a, 0x96, 0x6f, 0xdc, 0xb9, + 0x15, 0x2e, 0x35, 0x9c, 0x30, 0x72, 0x6b, 0xf3, 0x0d, 0xbf, 0xb6, 0x53, 0x8d, 0xfc, 0x40, 0x26, + 0x1c, 0xcb, 0xbc, 0x28, 0xcc, 0xdd, 0xa8, 0xa6, 0xe8, 0xd3, 0xf9, 0xc6, 0xb3, 0xa8, 0x70, 0x66, + 0x5d, 0x68, 0x1d, 0x06, 0xb7, 0xdc, 0x08, 0x93, 0x96, 0x2f, 0x76, 0x8b, 0xcc, 0xad, 0xf0, 0x12, + 0x27, 0x49, 0xe7, 0xff, 0x16, 0x08, 0x2c, 0x99, 0xa0, 0xd7, 0xd5, 0x21, 0x30, 0x90, 0xaf, 0x2d, + 0x4c, 0x7b, 0x5e, 0x65, 0x1e, 0x03, 0xaf, 0x41, 0xd1, 0xdb, 0x0c, 0x3b, 0xc5, 0x0b, 0x59, 0x5f, + 0xae, 0xa6, 0xf3, 0x72, 0xaf, 0x2f, 0x57, 0x31, 0x2d, 0xc8, 0x9e, 0x16, 0x86, 0xb5, 0xd0, 0x15, + 0xa9, 0x53, 0x32, 0x5f, 0x5a, 0xae, 0x54, 0x17, 0xaa, 0x2b, 0xe9, 0x5c, 0xe4, 0x0c, 0x8c, 0x79, + 0x71, 0x74, 0x1d, 0x4a, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0x12, 0xe3, 0xcc, 0xc3, 0xe8, 0x92, + 0x24, 0x4a, 0x67, 0x20, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, 0x32, 0x0b, 0x34, + 0x7b, 0x10, 0x24, 0x9c, 0x94, 0x5e, 0xec, 0x25, 0x2d, 0x37, 0x2b, 0x60, 0x54, 0xc8, 0x14, 0xfc, + 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0xba, 0x70, 0x96, 0xc9, 0xec, 0xe8, 0x44, + 0xf0, 0x14, 0xde, 0xd1, 0x78, 0x7e, 0x11, 0xd3, 0x82, 0x68, 0x03, 0x60, 0xb3, 0x41, 0x64, 0xc2, + 0xfa, 0x91, 0xfc, 0xd3, 0x7f, 0x59, 0x51, 0xc9, 0x6c, 0x41, 0x54, 0x26, 0x8c, 0xa1, 0x58, 0xe3, + 0x43, 0xa7, 0x52, 0xcd, 0xf5, 0xea, 0x24, 0x60, 0xe6, 0x93, 0x9c, 0xa9, 0xb4, 0xc0, 0x28, 0xd2, + 0x53, 0x89, 0xc3, 0xb1, 0xe0, 0xc0, 0x78, 0x91, 0xd6, 0xf6, 0x66, 0xd8, 0x29, 0x2c, 0xfe, 0x02, + 0x69, 0x6d, 0x27, 0x26, 0x14, 0xe7, 0xc5, 0xe0, 0x58, 0x70, 0xa0, 0x4b, 0x66, 0x93, 0x2e, 0x20, + 0x12, 0x4c, 0x8f, 0xe7, 0x2f, 0x99, 0x65, 0x4e, 0x92, 0x5e, 0x32, 0x02, 0x81, 0x25, 0x13, 0xf4, + 0x49, 0x53, 0xda, 0x99, 0x60, 0x3c, 0x9f, 0xee, 0x22, 0xed, 0x18, 0x7c, 0x3b, 0xcb, 0x3b, 0x2f, + 0x43, 0x61, 0xb3, 0xc6, 0xcc, 0x2e, 0x39, 0x0a, 0xee, 0xe5, 0x05, 0x83, 0x1b, 0x0b, 0x33, 0xbd, + 0xbc, 0x80, 0x0b, 0x9b, 0x35, 0x3a, 0xf5, 0x9d, 0x3b, 0xed, 0x80, 0x2c, 0xbb, 0x0d, 0x22, 0x42, + 0xe4, 0x67, 0x4e, 0xfd, 0x39, 0x49, 0x94, 0x9e, 0xfa, 0x0a, 0x85, 0x63, 0x56, 0x94, 0x6f, 0x2c, + 0x83, 0x4d, 0xe5, 0xf3, 0x55, 0xa2, 0x56, 0x9a, 0x6f, 0xa6, 0x14, 0xb6, 0x03, 0xa3, 0xbb, 0x61, + 0x6b, 0x9b, 0xc8, 0x5d, 0x91, 0x19, 0x84, 0x72, 0x5e, 0xd3, 0x5f, 0x17, 0x84, 0x6e, 0x10, 0xb5, + 0x9d, 0x46, 0x6a, 0x23, 0x67, 0x7a, 0x80, 0xeb, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0x13, 0xe1, 0x6d, + 0x1e, 0xf2, 0x8a, 0x99, 0x86, 0x72, 0x26, 0x42, 0x46, 0x54, 0x2c, 0x3e, 0x11, 0x04, 0x02, 0x4b, + 0x26, 0xaa, 0xb3, 0xd9, 0x01, 0x74, 0xa2, 0x4b, 0x67, 0xa7, 0xda, 0x1b, 0x77, 0x36, 0x3b, 0x70, + 0x62, 0x56, 0xec, 0xa0, 0x69, 0x65, 0x24, 0xcc, 0x9e, 0x7e, 0x28, 0xff, 0xa0, 0xe9, 0x96, 0x60, + 0x9b, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbd, 0x4c, 0x84, 0xf1, + 0x7f, 0x32, 0x27, 0xf8, 0x5f, 0x3a, 0xc4, 0x19, 0xff, 0x38, 0x85, 0xc2, 0x31, 0x2b, 0x54, 0x87, + 0xb1, 0x96, 0x11, 0x15, 0x93, 0xa5, 0x23, 0xc8, 0x91, 0x0b, 0xb2, 0xe2, 0x67, 0x72, 0x75, 0x86, + 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x37, 0x8c, 0x3f, 0xf4, 0x62, 0xd9, 0x0a, 0x72, 0x86, 0x3a, 0xe3, + 0x2d, 0x18, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x3c, 0xc9, 0x0f, 0x59, 0xd2, + 0x8f, 0x3c, 0x13, 0x6e, 0x96, 0x4d, 0x43, 0x86, 0x82, 0x16, 0x28, 0x1c, 0xb3, 0xa2, 0x3b, 0x39, + 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, 0x2b, 0x8a, 0xa3, + 0x4e, 0x45, 0x2e, 0x66, 0x09, 0x0b, 0x72, 0xda, 0xa5, 0x42, 0x1f, 0xa7, 0xdb, 0xa5, 0x50, 0x38, + 0x66, 0x65, 0xff, 0x60, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xa1, 0xa6, 0x12, 0x7b, 0xb3, 0x24, + 0x0c, 0x35, 0x5c, 0x6d, 0x10, 0x53, 0xf5, 0x1c, 0xcc, 0xf4, 0x12, 0x4c, 0xaa, 0x47, 0x64, 0x0d, + 0xb7, 0xb6, 0xb7, 0x1e, 0x6b, 0x6a, 0x54, 0xd8, 0x8f, 0x6a, 0x92, 0x00, 0xa7, 0xcb, 0xa0, 0x39, + 0x18, 0x37, 0x80, 0x2b, 0x8b, 0x42, 0x3d, 0x10, 0x87, 0xc8, 0x37, 0xd1, 0x38, 0x49, 0x6f, 0xff, + 0x9c, 0x05, 0x0f, 0xe5, 0xe4, 0x2b, 0xee, 0x39, 0x56, 0xe7, 0x26, 0x8c, 0xb7, 0xcc, 0xa2, 0x5d, + 0xc2, 0x0b, 0x1b, 0x59, 0x91, 0x55, 0x5b, 0x13, 0x08, 0x9c, 0x64, 0x6a, 0xff, 0x4c, 0x01, 0x4e, + 0x77, 0xf4, 0x8a, 0x46, 0x18, 0x4e, 0x6c, 0x35, 0x43, 0x67, 0x21, 0x20, 0x75, 0xe2, 0x45, 0xae, + 0xd3, 0xa8, 0xb6, 0x48, 0x4d, 0x33, 0xb5, 0x31, 0xf7, 0xe2, 0x4b, 0x6b, 0xd5, 0xb9, 0x34, 0x05, + 0xce, 0x29, 0x89, 0x96, 0x01, 0xa5, 0x31, 0x62, 0x84, 0xd9, 0xd5, 0x34, 0xcd, 0x0f, 0x67, 0x94, + 0x40, 0x1f, 0x84, 0x51, 0xe5, 0x6d, 0xad, 0x8d, 0x38, 0xdb, 0xd8, 0xb1, 0x8e, 0xc0, 0x26, 0x1d, + 0xba, 0xc8, 0x73, 0xa7, 0x88, 0x2c, 0x3b, 0xc2, 0x2e, 0x37, 0x2e, 0x13, 0xa3, 0x08, 0x30, 0xd6, + 0x69, 0xe6, 0x5f, 0xfa, 0xed, 0x6f, 0x9e, 0x79, 0xdf, 0xef, 0x7f, 0xf3, 0xcc, 0xfb, 0xfe, 0xe8, + 0x9b, 0x67, 0xde, 0xf7, 0x3d, 0x77, 0xcf, 0x58, 0xbf, 0x7d, 0xf7, 0x8c, 0xf5, 0xfb, 0x77, 0xcf, + 0x58, 0x7f, 0x74, 0xf7, 0x8c, 0xf5, 0xef, 0xee, 0x9e, 0xb1, 0xbe, 0xf4, 0xa7, 0x67, 0xde, 0xf7, + 0x26, 0x8a, 0xa3, 0xdf, 0x5e, 0xa0, 0xa3, 0x73, 0x61, 0xf7, 0xe2, 0xff, 0x0a, 0x00, 0x00, 0xff, + 0xff, 0x31, 0xd1, 0xcb, 0x48, 0xf3, 0x1a, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8265,6 +8423,70 @@ func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClusterTrustBundleProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SignerName != nil { + i -= len(*m.SignerName) + copy(dAtA[i:], *m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11705,6 +11927,18 @@ func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Sleep != nil { + { + size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.TCPSocket != nil { { size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) @@ -12125,6 +12359,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -12238,6 +12479,39 @@ func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ModifyVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModifyVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModifyVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetVolumeAttributesClassName) + copy(dAtA[i:], m.TargetVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetVolumeAttributesClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13757,6 +14031,13 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x4a + } if m.DataSourceRef != nil { { size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i]) @@ -13854,6 +14135,25 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.ModifyVolumeStatus != nil { + { + size, err := m.ModifyVolumeStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CurrentVolumeAttributesClassName != nil { + i -= len(*m.CurrentVolumeAttributesClassName) + copy(dAtA[i:], *m.CurrentVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentVolumeAttributesClassName))) + i-- + dAtA[i] = 0x42 + } if len(m.AllocatedResourceStatuses) > 0 { keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses)) for k := range m.AllocatedResourceStatuses { @@ -14414,6 +14714,13 @@ func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x52 + } if m.NodeAffinity != nil { { size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) @@ -14722,6 +15029,24 @@ func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MismatchLabelKeys) > 0 { + for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MismatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MismatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.NamespaceSelector != nil { { size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -19244,6 +19569,32 @@ func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SleepAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19950,6 +20301,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClusterTrustBundle != nil { + { + size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.ServiceAccountToken != nil { { size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) @@ -20001,6 +20364,87 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *VolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20893,6 +21337,32 @@ func (m *ClientIPConfig) Size() (n int) { return n } +func (m *ClusterTrustBundleProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SignerName != nil { + l = len(*m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *ComponentCondition) Size() (n int) { if m == nil { return 0 @@ -22168,6 +22638,10 @@ func (m *LifecycleHandler) Size() (n int) { l = m.TCPSocket.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Sleep != nil { + l = m.Sleep.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22299,6 +22773,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -22349,6 +22827,19 @@ func (m *LocalVolumeSource) Size() (n int) { return n } +func (m *ModifyVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *NFSVolumeSource) Size() (n int) { if m == nil { return 0 @@ -22930,6 +23421,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.DataSourceRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22979,6 +23474,14 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.CurrentVolumeAttributesClassName != nil { + l = len(*m.CurrentVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ModifyVolumeStatus != nil { + l = m.ModifyVolumeStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23166,6 +23669,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23259,6 +23766,18 @@ func (m *PodAffinityTerm) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MismatchLabelKeys) > 0 { + for _, s := range m.MismatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -24900,6 +25419,16 @@ func (m *SessionAffinityConfig) Size() (n int) { return n } +func (m *SleepAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + return n +} + func (m *StorageOSPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -25193,6 +25722,37 @@ func (m *VolumeProjection) Size() (n int) { l = m.ServiceAccountToken.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ClusterTrustBundle != nil { + l = m.ClusterTrustBundle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -25623,6 +26183,20 @@ func (this *ClientIPConfig) String() string { }, "") return s } +func (this *ClusterTrustBundleProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleProjection{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *ComponentCondition) String() string { if this == nil { return "nil" @@ -26577,6 +27151,7 @@ func (this *LifecycleHandler) String() string { `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`, `}`, }, "") return s @@ -26716,6 +27291,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -26757,6 +27333,17 @@ func (this *LocalVolumeSource) String() string { }, "") return s } +func (this *ModifyVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ModifyVolumeStatus{`, + `TargetVolumeAttributesClassName:` + fmt.Sprintf("%v", this.TargetVolumeAttributesClassName) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} func (this *NFSVolumeSource) String() string { if this == nil { return "nil" @@ -27206,13 +27793,14 @@ func (this *PersistentVolumeClaimSpec) String() string { } s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "VolumeResourceRequirements", "VolumeResourceRequirements", 1), `&`, ``, 1) + `,`, `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -27263,6 +27851,8 @@ func (this *PersistentVolumeClaimStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`, + `CurrentVolumeAttributesClassName:` + valueToStringGenerated(this.CurrentVolumeAttributesClassName) + `,`, + `ModifyVolumeStatus:` + strings.Replace(this.ModifyVolumeStatus.String(), "ModifyVolumeStatus", "ModifyVolumeStatus", 1) + `,`, `}`, }, "") return s @@ -27360,6 +27950,7 @@ func (this *PersistentVolumeSpec) String() string { `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -27430,6 +28021,8 @@ func (this *PodAffinityTerm) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, + `MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`, `}`, }, "") return s @@ -28709,6 +29302,16 @@ func (this *SessionAffinityConfig) String() string { }, "") return s } +func (this *SleepAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SleepAction{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `}`, + }, "") + return s +} func (this *StorageOSPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -28910,6 +29513,38 @@ func (this *VolumeProjection) String() string { `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&VolumeResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, `}`, }, "") return s @@ -32037,7 +32672,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32060,15 +32695,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32096,11 +32731,12 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32128,13 +32764,14 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.SignerName = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32144,27 +32781,31 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32192,8 +32833,29 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32215,7 +32877,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32238,17 +32900,17 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32258,28 +32920,206 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -43048,6 +43888,42 @@ func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sleep == nil { + m.Sleep = &SleepAction{} + } + if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44323,6 +45199,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) @@ -44659,7 +45568,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44682,15 +45591,15 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ModifyVolumeStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ModifyVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetVolumeAttributesClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44718,11 +45627,11 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + m.TargetVolumeAttributesClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44750,28 +45659,8 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Status = PersistentVolumeClaimModifyVolumeStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44793,7 +45682,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44816,17 +45705,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44836,144 +45725,278 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Server = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49775,6 +50798,39 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50308,6 +51364,75 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue)) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentVolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CurrentVolumeAttributesClassName = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModifyVolumeStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ModifyVolumeStatus == nil { + m.ModifyVolumeStatus = &ModifyVolumeStatus{} + } + if err := m.ModifyVolumeStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51930,6 +53055,39 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52679,6 +53837,70 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66874,6 +68096,75 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *SleepAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -69418,6 +70709,350 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterTrustBundle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterTrustBundle == nil { + m.ClusterTrustBundle = &ClusterTrustBundleProjection{} + } + if err := m.ClusterTrustBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 901e83731..cf9b6e6eb 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -228,10 +228,8 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -433,6 +431,40 @@ message ClientIPConfig { optional int32 timeoutSeconds = 1; } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +message ClusterTrustBundleProjection { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + optional string name = 1; + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + optional string signerName = 2; + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + optional bool optional = 5; + + // Relative path from the volume root to write the bundle. + optional string path = 4; +} + // Information about the condition of a component. message ComponentCondition { // Type of condition for a component. @@ -1159,7 +1191,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -2088,6 +2120,11 @@ message LifecycleHandler { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional optional TCPSocketAction tcpSocket = 3; + + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + optional SleepAction sleep = 4; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -2171,6 +2208,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -2211,6 +2257,24 @@ message LocalVolumeSource { optional string fsType = 2; } +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +message ModifyVolumeStatus { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + optional string targetVolumeAttributesClassName = 1; + + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + optional string status = 2; +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { @@ -2816,7 +2880,7 @@ message PersistentVolumeClaimSpec { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - optional ResourceRequirements resources = 2; + optional VolumeResourceRequirements resources = 2; // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional @@ -2868,6 +2932,22 @@ message PersistentVolumeClaimSpec { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional optional TypedObjectReference dataSourceRef = 8; + + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 9; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2957,6 +3037,20 @@ message PersistentVolumeClaimStatus { // +mapType=granular // +optional map allocatedResourceStatuses = 7; + + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional string currentVolumeAttributesClassName = 8; + + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional ModifyVolumeStatus modifyVolumeStatus = 9; } // PersistentVolumeClaimTemplate is used to produce @@ -3161,6 +3255,17 @@ message PersistentVolumeSpec { // This field influences the scheduling of pods that use this volume. // +optional optional VolumeNodeAffinity nodeAffinity = 9; + + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 10; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -3253,6 +3358,7 @@ message PodAffinity { // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; @@ -3277,6 +3383,32 @@ message PodAffinityTerm { // An empty selector ({}) matches all namespaces. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string matchLabelKeys = 5; + + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string mismatchLabelKeys = 6; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -5250,7 +5382,7 @@ message ServicePort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5553,6 +5685,12 @@ message SessionAffinityConfig { optional ClientIPConfig clientIP = 1; } +// SleepAction describes a "sleep" action. +message SleepAction { + // Seconds is the number of seconds to sleep. + optional int64 seconds = 1; +} + // Represents a StorageOS persistent volume resource. message StorageOSPersistentVolumeSource { // volumeName is the human-readable name of the StorageOS volume. Volume @@ -5960,6 +6098,39 @@ message VolumeProjection { // serviceAccountToken is information about the serviceAccountToken data to project // +optional optional ServiceAccountTokenProjection serviceAccountToken = 4; + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + optional ClusterTrustBundleProjection clusterTrustBundle = 5; +} + +// VolumeResourceRequirements describes the storage resource requirements for a volume. +message VolumeResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map requests = 2; } // Represents the source of a volume to mount. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 9e05c2235..1aade3806 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -363,6 +363,16 @@ type PersistentVolumeSpec struct { // This field influences the scheduling of pods that use this volume. // +optional NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -486,7 +496,7 @@ type PersistentVolumeClaimSpec struct { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` @@ -533,6 +543,21 @@ type PersistentVolumeClaimSpec struct { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` } type TypedObjectReference struct { @@ -561,6 +586,11 @@ const ( PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + + // Applying the target VolumeAttributesClass encountered an error + PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" + // Volume is being modified + PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume" ) // +enum @@ -587,6 +617,38 @@ const ( PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" ) +// +enum +// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately +type PersistentVolumeClaimModifyVolumeStatus string + +const ( + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing + PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending" + // InProgress indicates that the volume is being modified + PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress" + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified + PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible" +) + +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +type ModifyVolumeStatus struct { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"` + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"` +} + // PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` @@ -693,6 +755,18 @@ type PersistentVolumeClaimStatus struct { // +mapType=granular // +optional AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` } // +enum @@ -1763,6 +1837,40 @@ type ServiceAccountTokenProjection struct { Path string `json:"path" protobuf:"bytes,3,opt,name=path"` } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +type ClusterTrustBundleProjection struct { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"` + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"` + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"` + + // Relative path from the volume root to write the bundle. + Path string `json:"path" protobuf:"bytes,4,rep,name=path"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { // sources is the list of volume projections @@ -1794,6 +1902,24 @@ type VolumeProjection struct { // serviceAccountToken is information about the serviceAccountToken data to project // +optional ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` } const ( @@ -1894,10 +2020,8 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2272,6 +2396,12 @@ type ExecAction struct { Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"` +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2417,6 +2547,27 @@ type ResourceRequirements struct { Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` } +// VolumeResourceRequirements describes the storage resource requirements for a volume. +type VolumeResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` + + // Claims got added by accident when volumes shared the ResourceRequirements struct + // with containers. Stripping the field got added in 1.27 and was backported to 1.26. + // Starting with Kubernetes 1.28, this field is not part of the volume API anymore. + // + // Future extensions must not use "claims" or field number 3. + // Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` +} + // ResourceClaim references one entry in PodSpec.ResourceClaims. type ResourceClaim struct { // Name must match the name of one entry in pod.spec.resourceClaims of @@ -2646,6 +2797,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -2845,6 +3000,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -3136,6 +3294,7 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies a static list of namespace names that the term applies to. @@ -3157,6 +3316,30 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` } // Node affinity is a group of node affinity scheduling rules. @@ -4692,6 +4875,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -4709,6 +4901,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -5003,7 +5197,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5247,7 +5441,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -7054,3 +7248,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 9734d8b41..01152a096 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -127,7 +127,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -228,6 +228,19 @@ func (ClientIPConfig) SwaggerDoc() map[string]string { return map_ClientIPConfig } +var map_ClusterTrustBundleProjection = map[string]string{ + "": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "name": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "signerName": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "labelSelector": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", + "optional": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "path": "Relative path from the volume root to write the bundle.", +} + +func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleProjection +} + var map_ComponentCondition = map[string]string{ "": "Information about the condition of a component.", "type": "Type of condition for a component. Valid value: \"Healthy\"", @@ -531,7 +544,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -935,6 +948,7 @@ var map_LifecycleHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "sleep": "Sleep represents the duration that the container should sleep before being terminated.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -988,6 +1002,7 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ipMode": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } @@ -1023,6 +1038,16 @@ func (LocalVolumeSource) SwaggerDoc() map[string]string { return map_LocalVolumeSource } +var map_ModifyVolumeStatus = map[string]string{ + "": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", + "targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", + "status": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", +} + +func (ModifyVolumeStatus) SwaggerDoc() map[string]string { + return map_ModifyVolumeStatus +} + var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", "server": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", @@ -1339,15 +1364,16 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "selector": "selector is a label query over volumes to consider for binding.", - "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", - "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "selector is a label query over volumes to consider for binding.", + "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1355,13 +1381,15 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "capacity represents the actual resources of the underlying volume.", - "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "capacity represents the actual resources of the underlying volume.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1438,6 +1466,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1489,10 +1518,12 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", + "labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -2325,7 +2356,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2390,6 +2421,15 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string { return map_SessionAffinityConfig } +var map_SleepAction = map[string]string{ + "": "SleepAction describes a \"sleep\" action.", + "seconds": "Seconds is the number of seconds to sleep.", +} + +func (SleepAction) SwaggerDoc() map[string]string { + return map_SleepAction +} + var map_StorageOSPersistentVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", @@ -2566,12 +2606,23 @@ var map_VolumeProjection = map[string]string{ "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", + "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", } func (VolumeProjection) SwaggerDoc() map[string]string { return map_VolumeProjection } +var map_VolumeResourceRequirements = map[string]string{ + "": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", +} + +func (VolumeResourceRequirements) SwaggerDoc() map[string]string { + return map_VolumeResourceRequirements +} + var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index d76f0bbbc..45172e0e2 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -466,6 +466,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignerName != nil { + in, out := &in.SignerName, &out.SignerName + *out = new(string) + **out = **in + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection. +func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection { + if in == nil { + return nil + } + out := new(ClusterTrustBundleProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { *out = *in @@ -2045,6 +2081,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -2228,6 +2269,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -2308,6 +2354,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus. +func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus { + if in == nil { + return nil + } + out := new(ModifyVolumeStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { *out = *in @@ -3056,6 +3118,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3105,6 +3172,16 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val } } + if in.CurrentVolumeAttributesClassName != nil { + in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName + *out = new(string) + **out = **in + } + if in.ModifyVolumeStatus != nil { + in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus + *out = new(ModifyVolumeStatus) + **out = **in + } return } @@ -3347,6 +3424,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = new(VolumeNodeAffinity) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3472,6 +3554,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5681,6 +5773,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in @@ -6027,6 +6135,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ServiceAccountTokenProjection) (*in).DeepCopyInto(*out) } + if in.ClusterTrustBundle != nil { + in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle + *out = new(ClusterTrustBundleProjection) + (*in).DeepCopyInto(*out) + } return } @@ -6040,6 +6153,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements. +func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements { + if in == nil { + return nil + } + out := new(VolumeResourceRequirements) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 490ce8922..6d234017b 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -118,7 +118,7 @@ message EndpointHints { // +structType=atomic message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -145,7 +145,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index efbb09918..7ebb07ca3 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -168,7 +168,7 @@ type ForZone struct { // +structType=atomic type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -195,7 +195,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index bef774539..41c306056 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 8b6c360b0..ec555a40b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -119,7 +119,7 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index f09f7f320..defd8e2ce 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -172,7 +172,7 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index b1d4c306c..847d4d58e 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -64,7 +64,7 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go similarity index 73% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/api/flowcontrol/v1/doc.go index a3d4d0c60..1bc51d406 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io -// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". -package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" +// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io". +package v1 // import "k8s.io/api/flowcontrol/v1" diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go similarity index 91% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/flowcontrol/v1/generated.pb.go index b54e1ceef..c235ba10d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto -package v1alpha1 +package v1 import ( fmt "fmt" @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{0} + return fileDescriptor_f8a25df358697d27, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{1} + return fileDescriptor_f8a25df358697d27, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{2} + return fileDescriptor_f8a25df358697d27, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{3} + return fileDescriptor_f8a25df358697d27, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{4} + return fileDescriptor_f8a25df358697d27, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{5} + return fileDescriptor_f8a25df358697d27, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{6} + return fileDescriptor_f8a25df358697d27, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{7} + return fileDescriptor_f8a25df358697d27, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{8} + return fileDescriptor_f8a25df358697d27, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{9} + return fileDescriptor_f8a25df358697d27, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{10} + return fileDescriptor_f8a25df358697d27, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{11} + return fileDescriptor_f8a25df358697d27, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{12} + return fileDescriptor_f8a25df358697d27, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{13} + return fileDescriptor_f8a25df358697d27, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{14} + return fileDescriptor_f8a25df358697d27, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{15} + return fileDescriptor_f8a25df358697d27, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{16} + return fileDescriptor_f8a25df358697d27, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{17} + return fileDescriptor_f8a25df358697d27, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{18} + return fileDescriptor_f8a25df358697d27, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{19} + return fileDescriptor_f8a25df358697d27, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{20} + return fileDescriptor_f8a25df358697d27, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{21} + return fileDescriptor_f8a25df358697d27, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{22} + return fileDescriptor_f8a25df358697d27, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -688,139 +688,137 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { - proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration") - proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") - proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") - proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") - proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") - proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") - proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") - proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") - proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") - proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") - proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") - proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") - proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") - proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") - proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") - proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") - proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") - proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") - proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") - proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") - proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") - proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") - proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) -} - -var fileDescriptor_45ba024d525b289b = []byte{ - // 1621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46, - 0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8, - 0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7, - 0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e, - 0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7, - 0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87, - 0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb, - 0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17, - 0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b, - 0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2, - 0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e, - 0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda, - 0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5, - 0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e, - 0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee, - 0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe, - 0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba, - 0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c, - 0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95, - 0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1, - 0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70, - 0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7, - 0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02, - 0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e, - 0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa, - 0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae, - 0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60, - 0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50, - 0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f, - 0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f, - 0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f, - 0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47, - 0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb, - 0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a, - 0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87, - 0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb, - 0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72, - 0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21, - 0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51, - 0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37, - 0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8, - 0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f, - 0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46, - 0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c, - 0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93, - 0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0, - 0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2, - 0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63, - 0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89, - 0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae, - 0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2, - 0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb, - 0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a, - 0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18, - 0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0, - 0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58, - 0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c, - 0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79, - 0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f, - 0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a, - 0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88, - 0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53, - 0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b, - 0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b, - 0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd, - 0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5, - 0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5, - 0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22, - 0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0, - 0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06, - 0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00, - 0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7, - 0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18, - 0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc, - 0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf, - 0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d, - 0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72, - 0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43, - 0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60, - 0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05, - 0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14, - 0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5, - 0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78, - 0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f, - 0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad, - 0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2, - 0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69, - 0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42, - 0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94, - 0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67, - 0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50, - 0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac, - 0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27, - 0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3, - 0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22, - 0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82, - 0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd, - 0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57, - 0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55, - 0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9, - 0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e, - 0x4e, 0xdd, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_f8a25df358697d27) +} + +var fileDescriptor_f8a25df358697d27 = []byte{ + // 1588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, + 0x16, 0xb6, 0x64, 0xc9, 0xb6, 0x8e, 0x9f, 0x69, 0xc7, 0x65, 0xc5, 0xb9, 0x25, 0x39, 0x73, 0xeb, + 0xe6, 0x71, 0x43, 0xa4, 0xc4, 0x45, 0x20, 0xa9, 0x00, 0xa9, 0x4c, 0x12, 0xf2, 0xb2, 0x1d, 0xa7, + 0x95, 0x07, 0x15, 0xa8, 0x82, 0xd1, 0xa8, 0x2d, 0x4d, 0x2c, 0xcd, 0x0c, 0xdd, 0x33, 0x32, 0xa6, + 0x8a, 0x2a, 0x7e, 0x42, 0x56, 0x2c, 0x59, 0xc0, 0x3f, 0x60, 0x45, 0xc1, 0x86, 0x65, 0x76, 0x64, + 0x19, 0x58, 0xa8, 0x88, 0xf8, 0x0b, 0x2c, 0x20, 0x2b, 0xaa, 0x7b, 0x7a, 0x66, 0x34, 0x92, 0x66, + 0xac, 0xf2, 0x22, 0x6c, 0xd8, 0x79, 0xce, 0xf9, 0xce, 0x77, 0xba, 0x4f, 0x9f, 0x97, 0x0c, 0xea, + 0xce, 0x05, 0x56, 0x32, 0xac, 0xf2, 0x8e, 0x5b, 0x25, 0xd4, 0x24, 0x0e, 0x61, 0xe5, 0x36, 0x31, + 0x6b, 0x16, 0x2d, 0x4b, 0x85, 0x66, 0x1b, 0xe5, 0xed, 0xa6, 0xb5, 0xab, 0x5b, 0xa6, 0x43, 0xad, + 0x66, 0xb9, 0x7d, 0xae, 0x5c, 0x27, 0x26, 0xa1, 0x9a, 0x43, 0x6a, 0x25, 0x9b, 0x5a, 0x8e, 0x85, + 0x8e, 0x78, 0xd0, 0x92, 0x66, 0x1b, 0xa5, 0x1e, 0x68, 0xa9, 0x7d, 0x6e, 0xe5, 0x4c, 0xdd, 0x70, + 0x1a, 0x6e, 0xb5, 0xa4, 0x5b, 0xad, 0x72, 0xdd, 0xaa, 0x5b, 0x65, 0x61, 0x51, 0x75, 0xb7, 0xc5, + 0x97, 0xf8, 0x10, 0x7f, 0x79, 0x4c, 0x2b, 0x6f, 0x86, 0x4e, 0x5b, 0x9a, 0xde, 0x30, 0x4c, 0x42, + 0xf7, 0xca, 0xf6, 0x4e, 0x9d, 0x0b, 0x58, 0xb9, 0x45, 0x1c, 0x6d, 0x88, 0xff, 0x95, 0x72, 0x9c, + 0x15, 0x75, 0x4d, 0xc7, 0x68, 0x91, 0x01, 0x83, 0xb7, 0xf6, 0x33, 0x60, 0x7a, 0x83, 0xb4, 0xb4, + 0x7e, 0x3b, 0xe5, 0xc7, 0x14, 0xac, 0x5e, 0xff, 0x8c, 0xb4, 0x6c, 0x67, 0x8b, 0x1a, 0x16, 0x35, + 0x9c, 0xbd, 0x75, 0xd2, 0x26, 0xcd, 0xab, 0x96, 0xb9, 0x6d, 0xd4, 0x5d, 0xaa, 0x39, 0x86, 0x65, + 0xa2, 0x0f, 0x20, 0x6f, 0x5a, 0x2d, 0xc3, 0xd4, 0xb8, 0x5c, 0x77, 0x29, 0x25, 0xa6, 0xbe, 0x57, + 0x69, 0x68, 0x94, 0xb0, 0x7c, 0x6a, 0x35, 0x75, 0x32, 0xab, 0xfe, 0xa7, 0xdb, 0x29, 0xe6, 0x37, + 0x63, 0x30, 0x38, 0xd6, 0x1a, 0xbd, 0x0b, 0xf3, 0x4d, 0x62, 0xd6, 0xb4, 0x6a, 0x93, 0x6c, 0x11, + 0xaa, 0x13, 0xd3, 0xc9, 0xa7, 0x05, 0xe1, 0x62, 0xb7, 0x53, 0x9c, 0x5f, 0x8f, 0xaa, 0x70, 0x3f, + 0x56, 0x79, 0x0c, 0xcb, 0xef, 0x37, 0xad, 0xdd, 0x6b, 0x06, 0x73, 0x0c, 0xb3, 0xee, 0x1a, 0xac, + 0x41, 0xe8, 0x06, 0x71, 0x1a, 0x56, 0x0d, 0x5d, 0x86, 0x8c, 0xb3, 0x67, 0x13, 0x71, 0xbe, 0x9c, + 0x7a, 0xfa, 0x59, 0xa7, 0x38, 0xd6, 0xed, 0x14, 0x33, 0xf7, 0xf7, 0x6c, 0xf2, 0xaa, 0x53, 0x3c, + 0x1a, 0x63, 0xc6, 0xd5, 0x58, 0x18, 0x2a, 0x4f, 0xd3, 0x00, 0x1c, 0x55, 0x11, 0x81, 0x43, 0x9f, + 0xc0, 0x14, 0x7f, 0xac, 0x9a, 0xe6, 0x68, 0x82, 0x73, 0x7a, 0xed, 0x6c, 0x29, 0x4c, 0x92, 0x20, + 0xe6, 0x25, 0x7b, 0xa7, 0xce, 0x05, 0xac, 0xc4, 0xd1, 0xa5, 0xf6, 0xb9, 0xd2, 0xdd, 0xea, 0x13, + 0xa2, 0x3b, 0x1b, 0xc4, 0xd1, 0x54, 0x24, 0x4f, 0x01, 0xa1, 0x0c, 0x07, 0xac, 0xe8, 0x0e, 0x64, + 0x98, 0x4d, 0x74, 0x11, 0x80, 0xe9, 0xb5, 0x53, 0xa5, 0xd8, 0x14, 0x2c, 0x85, 0xc7, 0xaa, 0xd8, + 0x44, 0x57, 0x67, 0xfc, 0xcb, 0xf1, 0x2f, 0x2c, 0x48, 0x50, 0x05, 0x26, 0x98, 0xa3, 0x39, 0x2e, + 0xcb, 0x8f, 0x0b, 0xba, 0xd3, 0xa3, 0xd1, 0x09, 0x13, 0x75, 0x4e, 0x12, 0x4e, 0x78, 0xdf, 0x58, + 0x52, 0x29, 0x2f, 0xd2, 0xb0, 0x18, 0x82, 0xaf, 0x5a, 0x66, 0xcd, 0x10, 0xf9, 0x71, 0x29, 0x12, + 0xeb, 0x13, 0x7d, 0xb1, 0x5e, 0x1e, 0x62, 0x12, 0xc6, 0x19, 0x5d, 0x0c, 0x4e, 0x9a, 0x16, 0xe6, + 0xc7, 0xa2, 0xce, 0x5f, 0x75, 0x8a, 0xf3, 0x81, 0x59, 0xf4, 0x3c, 0xa8, 0x0d, 0xa8, 0xa9, 0x31, + 0xe7, 0x3e, 0xd5, 0x4c, 0xe6, 0xd1, 0x1a, 0x2d, 0x22, 0x2f, 0xfc, 0xff, 0xd1, 0x5e, 0x87, 0x5b, + 0xa8, 0x2b, 0xd2, 0x25, 0x5a, 0x1f, 0x60, 0xc3, 0x43, 0x3c, 0xa0, 0xe3, 0x30, 0x41, 0x89, 0xc6, + 0x2c, 0x33, 0x9f, 0x11, 0x47, 0x0e, 0xe2, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x05, 0x93, 0x2d, + 0xc2, 0x98, 0x56, 0x27, 0xf9, 0xac, 0x00, 0xce, 0x4b, 0xe0, 0xe4, 0x86, 0x27, 0xc6, 0xbe, 0x5e, + 0xf9, 0x21, 0x05, 0x73, 0x61, 0x9c, 0xd6, 0x0d, 0xe6, 0xa0, 0x8f, 0x06, 0x32, 0xae, 0x34, 0xda, + 0x9d, 0xb8, 0xb5, 0xc8, 0xb7, 0x05, 0xe9, 0x6e, 0xca, 0x97, 0xf4, 0x64, 0xdb, 0x6d, 0xc8, 0x1a, + 0x0e, 0x69, 0xf1, 0xa8, 0x8f, 0x9f, 0x9c, 0x5e, 0xfb, 0xdf, 0x48, 0xf9, 0xa1, 0xce, 0x4a, 0xc6, + 0xec, 0x2d, 0x6e, 0x8b, 0x3d, 0x0a, 0xe5, 0x97, 0xf1, 0xde, 0xc3, 0xf3, 0x2c, 0x44, 0xdf, 0xa4, + 0x60, 0xc5, 0x8e, 0xed, 0x28, 0xf2, 0x3e, 0xef, 0x25, 0x38, 0x8d, 0x6f, 0x47, 0x98, 0x6c, 0x13, + 0xde, 0x43, 0x88, 0xaa, 0xc8, 0xd3, 0xac, 0x24, 0x80, 0x13, 0x4e, 0x81, 0x6e, 0x03, 0x6a, 0x69, + 0x0e, 0x8f, 0x63, 0x7d, 0x8b, 0x12, 0x9d, 0xd4, 0x38, 0xab, 0x6c, 0x40, 0x41, 0x4e, 0x6c, 0x0c, + 0x20, 0xf0, 0x10, 0x2b, 0xf4, 0x05, 0x2c, 0xd6, 0x06, 0xfb, 0x89, 0x4c, 0xc6, 0xb5, 0x7d, 0xa2, + 0x3b, 0xa4, 0x13, 0xa9, 0xcb, 0xdd, 0x4e, 0x71, 0x71, 0x88, 0x02, 0x0f, 0xf3, 0x83, 0x1e, 0x41, + 0x96, 0xba, 0x4d, 0xc2, 0xf2, 0x19, 0xf1, 0x9c, 0x49, 0x0e, 0xb7, 0xac, 0xa6, 0xa1, 0xef, 0x61, + 0x8e, 0x7e, 0x64, 0x38, 0x8d, 0x8a, 0x2b, 0x9a, 0x11, 0x0b, 0xdf, 0x56, 0xa8, 0xb0, 0xc7, 0xa7, + 0xb4, 0x61, 0xa1, 0xbf, 0x3f, 0xa0, 0x2a, 0x80, 0xee, 0x97, 0x24, 0x9f, 0x00, 0xe3, 0x7d, 0xb9, + 0x19, 0x9f, 0x40, 0x41, 0x25, 0x87, 0xbd, 0x30, 0x10, 0x31, 0xdc, 0xc3, 0xaa, 0x9c, 0x85, 0x99, + 0x1b, 0xd4, 0x72, 0x6d, 0x79, 0x3c, 0xb4, 0x0a, 0x19, 0x53, 0x6b, 0xf9, 0x3d, 0x26, 0x68, 0x79, + 0x9b, 0x5a, 0x8b, 0x60, 0xa1, 0x51, 0xbe, 0x4e, 0xc1, 0xec, 0xba, 0xd1, 0x32, 0x1c, 0x4c, 0x98, + 0x6d, 0x99, 0x8c, 0xa0, 0xf3, 0x91, 0xbe, 0x74, 0xac, 0xaf, 0x2f, 0x1d, 0x8a, 0x80, 0x7b, 0x3a, + 0xd2, 0x43, 0x98, 0xfc, 0xd4, 0x25, 0xae, 0x61, 0xd6, 0x65, 0x2f, 0x2e, 0x27, 0xdc, 0xed, 0x9e, + 0x87, 0x8c, 0x24, 0x96, 0x3a, 0xcd, 0x6b, 0x5c, 0x6a, 0xb0, 0x4f, 0xa6, 0xfc, 0x91, 0x86, 0x63, + 0xc2, 0x27, 0xa9, 0xfd, 0x23, 0xc3, 0x96, 0xc0, 0x6c, 0xb3, 0xf7, 0xca, 0xf2, 0x76, 0x27, 0x13, + 0x6e, 0x17, 0x09, 0x91, 0xba, 0x24, 0x23, 0x18, 0x0d, 0x33, 0x8e, 0xb2, 0x0e, 0x9b, 0xe9, 0xe3, + 0xa3, 0xcf, 0x74, 0x74, 0x17, 0x96, 0xaa, 0x16, 0xa5, 0xd6, 0xae, 0x61, 0xd6, 0x85, 0x1f, 0x9f, + 0x24, 0x23, 0x48, 0x8e, 0x74, 0x3b, 0xc5, 0x25, 0x75, 0x18, 0x00, 0x0f, 0xb7, 0x53, 0x76, 0x61, + 0x69, 0x93, 0x77, 0x0d, 0x66, 0xb9, 0x54, 0x27, 0x61, 0xf6, 0xa3, 0x22, 0x64, 0xdb, 0x84, 0x56, + 0xbd, 0x0c, 0xce, 0xa9, 0x39, 0x9e, 0xfb, 0x0f, 0xb9, 0x00, 0x7b, 0x72, 0x7e, 0x13, 0x33, 0xb4, + 0x7c, 0x80, 0xd7, 0x59, 0x7e, 0x42, 0x40, 0xc5, 0x4d, 0x36, 0xa3, 0x2a, 0xdc, 0x8f, 0x55, 0x7e, + 0x4e, 0xc3, 0x72, 0x4c, 0xb1, 0xa1, 0x2d, 0x98, 0x62, 0xf2, 0x6f, 0x59, 0x40, 0x4a, 0xc2, 0x33, + 0x48, 0xb3, 0xb0, 0xa1, 0xfb, 0x3c, 0x38, 0x60, 0x41, 0x4f, 0x60, 0x96, 0x4a, 0xef, 0xc2, 0x9d, + 0x6c, 0xec, 0x67, 0x12, 0x68, 0x07, 0x63, 0x12, 0x3e, 0x31, 0xee, 0xe5, 0xc2, 0x51, 0x6a, 0xd4, + 0x86, 0x85, 0x9e, 0xcb, 0x7a, 0xee, 0xc6, 0x85, 0xbb, 0xb3, 0x09, 0xee, 0x86, 0xbe, 0x82, 0x9a, + 0x97, 0x1e, 0x17, 0x36, 0xfb, 0x18, 0xf1, 0x80, 0x0f, 0xe5, 0xa7, 0x34, 0x24, 0xf4, 0xfa, 0xd7, + 0xb0, 0xa3, 0x7d, 0x18, 0xd9, 0xd1, 0x2e, 0x1e, 0x68, 0x7e, 0xc5, 0xee, 0x6c, 0x7a, 0xdf, 0xce, + 0x76, 0xe9, 0x60, 0xf4, 0xc9, 0x3b, 0xdc, 0x9f, 0x69, 0xf8, 0x6f, 0xbc, 0x71, 0xb8, 0xd3, 0xdd, + 0x89, 0xf4, 0xce, 0xb7, 0xfb, 0x7a, 0xe7, 0x89, 0x11, 0x28, 0xfe, 0xdd, 0xf1, 0xfa, 0x76, 0xbc, + 0x5f, 0x53, 0x50, 0x88, 0x8f, 0xdb, 0x6b, 0xd8, 0xf9, 0x1e, 0x47, 0x77, 0xbe, 0xf3, 0x07, 0xca, + 0xaf, 0x98, 0x1d, 0xf0, 0x46, 0x52, 0x5a, 0x05, 0x2b, 0xdb, 0x08, 0x63, 0xfc, 0xdb, 0x74, 0x52, + 0x94, 0xc4, 0x72, 0xb9, 0xcf, 0xef, 0x8d, 0x88, 0xf5, 0x75, 0x93, 0x0f, 0x97, 0x16, 0x9f, 0x0f, + 0x5e, 0x2e, 0xea, 0x30, 0xd9, 0xf4, 0x86, 0xb0, 0xac, 0xe2, 0x77, 0xf6, 0x9b, 0x7f, 0x49, 0xe3, + 0xda, 0x1b, 0xf5, 0x12, 0x86, 0x7d, 0x66, 0xf4, 0x31, 0x4c, 0x10, 0xf1, 0xab, 0x7a, 0x84, 0x52, + 0xde, 0xef, 0xe7, 0xb7, 0x0a, 0x3c, 0xed, 0x3c, 0x14, 0x96, 0xb4, 0xca, 0x57, 0x29, 0x58, 0xdd, + 0xaf, 0x07, 0x20, 0x3a, 0x64, 0x4f, 0x3b, 0xd8, 0xce, 0x3d, 0xfa, 0xde, 0xf6, 0x5d, 0x0a, 0x0e, + 0x0f, 0xdb, 0x89, 0x78, 0x41, 0xf1, 0x45, 0x28, 0xd8, 0x62, 0x82, 0x82, 0xba, 0x27, 0xa4, 0x58, + 0x6a, 0xd1, 0x1b, 0x30, 0xd5, 0xd0, 0xcc, 0x5a, 0xc5, 0xf8, 0xdc, 0x5f, 0xc5, 0x83, 0x94, 0xbe, + 0x29, 0xe5, 0x38, 0x40, 0xa0, 0x6b, 0xb0, 0x20, 0xec, 0xd6, 0x89, 0x59, 0x77, 0x1a, 0xe2, 0x1d, + 0xe4, 0xb6, 0x11, 0xcc, 0x95, 0x7b, 0x7d, 0x7a, 0x3c, 0x60, 0xa1, 0xfc, 0x95, 0x02, 0x74, 0x90, + 0x05, 0xe1, 0x34, 0xe4, 0x34, 0xdb, 0x10, 0x7b, 0xaa, 0x57, 0x54, 0x39, 0x75, 0xb6, 0xdb, 0x29, + 0xe6, 0xae, 0x6c, 0xdd, 0xf2, 0x84, 0x38, 0xd4, 0x73, 0xb0, 0x3f, 0x45, 0xbd, 0x69, 0x29, 0xc1, + 0xbe, 0x63, 0x86, 0x43, 0x3d, 0xba, 0x00, 0x33, 0x7a, 0xd3, 0x65, 0x0e, 0xa1, 0x15, 0xdd, 0xb2, + 0x89, 0x68, 0x42, 0x53, 0xea, 0x61, 0x79, 0xa7, 0x99, 0xab, 0x3d, 0x3a, 0x1c, 0x41, 0xa2, 0x12, + 0x00, 0xaf, 0x23, 0x66, 0x6b, 0xdc, 0x4f, 0x56, 0xf8, 0x99, 0xe3, 0x0f, 0xb6, 0x19, 0x48, 0x71, + 0x0f, 0x42, 0x79, 0x02, 0x4b, 0x15, 0x42, 0xdb, 0x86, 0x4e, 0xae, 0xe8, 0xba, 0xe5, 0x9a, 0x8e, + 0xbf, 0x71, 0x97, 0x21, 0x17, 0xc0, 0x64, 0xa9, 0x1d, 0x92, 0xfe, 0x73, 0x01, 0x17, 0x0e, 0x31, + 0x41, 0x6d, 0xa7, 0x63, 0x6b, 0xfb, 0xfb, 0x34, 0x4c, 0x86, 0xf4, 0x99, 0x1d, 0xc3, 0xac, 0x49, + 0xe6, 0xa3, 0x3e, 0xfa, 0x8e, 0x61, 0xd6, 0x5e, 0x75, 0x8a, 0xd3, 0x12, 0xc6, 0x3f, 0xb1, 0x00, + 0xa2, 0x6b, 0x90, 0x71, 0x19, 0xa1, 0xb2, 0x6a, 0x8f, 0x27, 0xe4, 0xf1, 0x03, 0x46, 0xa8, 0xbf, + 0x32, 0x4d, 0x71, 0x52, 0x2e, 0xc0, 0xc2, 0x1a, 0xdd, 0x84, 0x6c, 0x9d, 0xbf, 0x87, 0x2c, 0xcc, + 0x13, 0x09, 0x34, 0xbd, 0xbf, 0x3f, 0xbc, 0xc7, 0x17, 0x12, 0xec, 0x11, 0xa0, 0x26, 0xcc, 0xb1, + 0x48, 0xe0, 0xc4, 0x23, 0x25, 0xaf, 0x40, 0x43, 0x23, 0xad, 0xa2, 0x6e, 0xa7, 0x38, 0x17, 0x55, + 0xe1, 0x3e, 0x6e, 0xa5, 0x0c, 0xd3, 0x3d, 0xd7, 0xda, 0xbf, 0x8f, 0xaa, 0x97, 0x9f, 0xbd, 0x2c, + 0x8c, 0x3d, 0x7f, 0x59, 0x18, 0x7b, 0xf1, 0xb2, 0x30, 0xf6, 0x65, 0xb7, 0x90, 0x7a, 0xd6, 0x2d, + 0xa4, 0x9e, 0x77, 0x0b, 0xa9, 0x17, 0xdd, 0x42, 0xea, 0xb7, 0x6e, 0x21, 0xf5, 0xf4, 0xf7, 0xc2, + 0xd8, 0xe3, 0x23, 0xb1, 0xff, 0x13, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x0a, 0x3e, 0x83, + 0x48, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { @@ -1244,9 +1242,11 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i } i-- dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) - i-- - dAtA[i] = 0x8 + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -2007,7 +2007,9 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { } var l int _ = l - n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) if m.LendablePercent != nil { @@ -2384,7 +2386,7 @@ func (this *LimitedPriorityLevelConfiguration) String() string { return "nil" } s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, - `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, @@ -3713,9 +3715,9 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) } - m.AssuredConcurrencyShares = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3725,11 +3727,12 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.NominalConcurrencyShares = &v case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto similarity index 94% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto rename to vendor/k8s.io/api/flowcontrol/v1/generated.proto index 6509386f2..a5c6f4fc4 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -19,14 +19,14 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.flowcontrol.v1alpha1; +package k8s.io.api.flowcontrol.v1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/flowcontrol/v1alpha1"; +option go_package = "k8s.io/api/flowcontrol/v1"; // ExemptPriorityLevelConfiguration describes the configurable aspects // of the handling of exempt requests. @@ -153,6 +153,8 @@ message FlowSchemaStatus { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated FlowSchemaCondition conditions = 1; } @@ -190,23 +192,28 @@ message LimitResponse { // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. + // + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). - // This field has a default value of 30. // +optional - optional int32 assuredConcurrencyShares = 1; + optional int32 nominalConcurrencyShares = 1; // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; @@ -381,6 +388,8 @@ message PriorityLevelConfigurationStatus { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated PriorityLevelConfigurationCondition conditions = 1; } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go b/vendor/k8s.io/api/flowcontrol/v1/register.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/register.go rename to vendor/k8s.io/api/flowcontrol/v1/register.go index 0c8a9cc56..02725b514 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go +++ b/vendor/k8s.io/api/flowcontrol/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "flowcontrol.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go similarity index 88% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types.go rename to vendor/k8s.io/api/flowcontrol/v1/types.go index 161411ff3..e62d23280 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,13 +57,55 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -84,10 +126,6 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -314,8 +352,10 @@ type FlowSchemaStatus struct { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // FlowSchemaCondition describes conditions for a FlowSchema. @@ -341,10 +381,6 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -364,10 +400,6 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -426,23 +458,28 @@ const ( // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). - // This field has a default value of 30. // +optional - AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` @@ -586,8 +623,10 @@ type PriorityLevelConfigurationStatus struct { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // PriorityLevelConfigurationCondition defines the condition of priority level. diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go index 1d0680c10..b8cb43636 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go similarity index 99% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go index a5c9737aa..f37090b75 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" @@ -237,6 +237,11 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } in.LimitResponse.DeepCopyInto(&out.LimitResponse) if in.LendablePercent != nil { in, out := &in.LendablePercent, &out.LendablePercent diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index 2b6a3d3fd..000000000 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,122 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 96df0ace7..04b54820c 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index 9e05ff1a0..abc3e4200 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 1405f3c3c..d69bdac62 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a8c8a3273..a832114af 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go index e8cf7abff..c66cb173f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 49a417809..921122731 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go index 810941557..0ffc22a23 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go @@ -103,10 +103,25 @@ const ( AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" ) +const ( + // This annotation is only for use in v1beta3. + // + // The presence of this annotation in a v1beta3 object means that + // a zero value in the 'NominalConcurrencyShares' field means zero + // rather than the old default of 30. + // + // To set a zero value for the 'NominalConcurrencyShares' field in v1beta3, + // set the annotation to an empty string: + // "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares": "" + // + PriorityLevelPreserveZeroConcurrencySharesKey = "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -128,6 +143,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -384,6 +400,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -404,6 +421,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go index 24b761385..7e46a1469 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v1beta3 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index f54d1f824..949ea513f 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -25,14 +25,12 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -46,15 +44,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } -func (*ClusterCIDR) ProtoMessage() {} -func (*ClusterCIDR) Descriptor() ([]byte, []int) { +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{0} } -func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { +func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -62,27 +60,27 @@ func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *ClusterCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDR.Merge(m, src) +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) } -func (m *ClusterCIDR) XXX_Size() int { +func (m *IPAddress) XXX_Size() int { return m.Size() } -func (m *ClusterCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo +var xxx_messageInfo_IPAddress proto.InternalMessageInfo -func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } -func (*ClusterCIDRList) ProtoMessage() {} -func (*ClusterCIDRList) Descriptor() ([]byte, []int) { +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{1} } -func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -90,27 +88,27 @@ func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRList.Merge(m, src) +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) } -func (m *ClusterCIDRList) XXX_Size() int { +func (m *IPAddressList) XXX_Size() int { return m.Size() } -func (m *ClusterCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo -func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } -func (*ClusterCIDRSpec) ProtoMessage() {} -func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{2} } -func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -118,27 +116,27 @@ func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) } -func (m *ClusterCIDRSpec) XXX_Size() int { +func (m *IPAddressSpec) XXX_Size() int { return m.Size() } -func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo -func (m *IPAddress) Reset() { *m = IPAddress{} } -func (*IPAddress) ProtoMessage() {} -func (*IPAddress) Descriptor() ([]byte, []int) { +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{3} } -func (m *IPAddress) XXX_Unmarshal(b []byte) error { +func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -146,27 +144,27 @@ func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *IPAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddress.Merge(m, src) +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) } -func (m *IPAddress) XXX_Size() int { +func (m *ParentReference) XXX_Size() int { return m.Size() } -func (m *IPAddress) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddress.DiscardUnknown(m) +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) } -var xxx_messageInfo_IPAddress proto.InternalMessageInfo +var xxx_messageInfo_ParentReference proto.InternalMessageInfo -func (m *IPAddressList) Reset() { *m = IPAddressList{} } -func (*IPAddressList) ProtoMessage() {} -func (*IPAddressList) Descriptor() ([]byte, []int) { +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{4} } -func (m *IPAddressList) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -174,27 +172,27 @@ func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressList.Merge(m, src) +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) } -func (m *IPAddressList) XXX_Size() int { +func (m *ServiceCIDR) XXX_Size() int { return m.Size() } -func (m *IPAddressList) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressList.DiscardUnknown(m) +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo -func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } -func (*IPAddressSpec) ProtoMessage() {} -func (*IPAddressSpec) Descriptor() ([]byte, []int) { +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{5} } -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -202,27 +200,27 @@ func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressSpec.Merge(m, src) +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) } -func (m *IPAddressSpec) XXX_Size() int { +func (m *ServiceCIDRList) XXX_Size() int { return m.Size() } -func (m *IPAddressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo -func (m *ParentReference) Reset() { *m = ParentReference{} } -func (*ParentReference) ProtoMessage() {} -func (*ParentReference) Descriptor() ([]byte, []int) { +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{6} } -func (m *ParentReference) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -230,26 +228,55 @@ func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ParentReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParentReference.Merge(m, src) +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) } -func (m *ParentReference) XXX_Size() int { +func (m *ServiceCIDRSpec) XXX_Size() int { return m.Size() } -func (m *ParentReference) XXX_DiscardUnknown() { - xxx_messageInfo_ParentReference.DiscardUnknown(m) +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) } -var xxx_messageInfo_ParentReference proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{7} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo func init() { - proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") - proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") - proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") } func init() { @@ -257,54 +284,51 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 698 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, - 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, - 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, - 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, - 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, - 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, - 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, - 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, - 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, - 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, - 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, - 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, - 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, - 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, - 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, - 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, - 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, - 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, - 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, - 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, - 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, - 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, - 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, - 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, - 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, - 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, - 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, - 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, - 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, - 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, - 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, - 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, - 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, - 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, - 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, - 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, - 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, - 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, - 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, - 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, - 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, - 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, - 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, -} - -func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { + // 648 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0x5f, 0xbf, 0x52, 0xaf, 0xa2, 0x2e, 0x9c, 0x28, 0x6c, 0x8a, + 0xa0, 0x33, 0x24, 0x42, 0x88, 0x2d, 0x6e, 0xa5, 0xaa, 0x12, 0xb4, 0x65, 0xba, 0x02, 0x75, 0xc1, + 0xc4, 0xbe, 0x75, 0x4c, 0xf0, 0x8f, 0x66, 0xc6, 0x01, 0x76, 0x3c, 0x02, 0x2f, 0xc0, 0x73, 0xb0, + 0x02, 0x89, 0x5d, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xf3, 0x22, 0x68, 0xc6, 0x8e, 0x9d, 0x34, 0xea, + 0xdf, 0xa6, 0x3b, 0xcf, 0xb9, 0xe7, 0x9c, 0xb9, 0xe7, 0xce, 0x8c, 0x8c, 0x76, 0x46, 0x2f, 0x04, + 0x0e, 0x62, 0x32, 0x4a, 0x07, 0xc0, 0x23, 0x90, 0x20, 0xc8, 0x18, 0x22, 0x2f, 0xe6, 0xa4, 0x28, + 0xb0, 0x24, 0x20, 0x11, 0xc8, 0x4f, 0x31, 0x1f, 0x05, 0x91, 0x4f, 0xc6, 0x3d, 0xf6, 0x31, 0x19, + 0xb2, 0x1e, 0xf1, 0x21, 0x02, 0xce, 0x24, 0x78, 0x38, 0xe1, 0xb1, 0x8c, 0x2d, 0x3b, 0xe7, 0x63, + 0x96, 0x04, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0xd3, 0x0f, 0xe4, 0x30, 0x1d, 0x60, 0x37, 0x0e, + 0x89, 0x1f, 0xfb, 0x31, 0xd1, 0xb2, 0x41, 0x7a, 0xac, 0x57, 0x7a, 0xa1, 0xbf, 0x72, 0xbb, 0xf5, + 0x67, 0xd5, 0xf6, 0x21, 0x73, 0x87, 0x41, 0x04, 0xfc, 0x0b, 0x49, 0x46, 0xbe, 0x02, 0x04, 0x09, + 0x41, 0x32, 0x32, 0x9e, 0x6b, 0x62, 0x9d, 0x5c, 0xa5, 0xe2, 0x69, 0x24, 0x83, 0x10, 0xe6, 0x04, + 0xcf, 0x6f, 0x12, 0x08, 0x77, 0x08, 0x21, 0xbb, 0xac, 0xeb, 0xfe, 0x32, 0x90, 0xb9, 0x7b, 0xf0, + 0xd2, 0xf3, 0x38, 0x08, 0x61, 0xbd, 0x47, 0xcb, 0xaa, 0x23, 0x8f, 0x49, 0xd6, 0x32, 0x3a, 0xc6, + 0x46, 0xb3, 0xff, 0x14, 0x57, 0xe3, 0x28, 0x8d, 0x71, 0x32, 0xf2, 0x15, 0x20, 0xb0, 0x62, 0xe3, + 0x71, 0x0f, 0xef, 0x0f, 0x3e, 0x80, 0x2b, 0x5f, 0x83, 0x64, 0x8e, 0x75, 0x72, 0xde, 0xae, 0x65, + 0xe7, 0x6d, 0x54, 0x61, 0xb4, 0x74, 0xb5, 0xf6, 0x51, 0x5d, 0x24, 0xe0, 0xb6, 0x16, 0xb4, 0xfb, + 0x26, 0xbe, 0x7e, 0xd8, 0xb8, 0x6c, 0xed, 0x30, 0x01, 0xd7, 0xf9, 0xaf, 0xb0, 0xae, 0xab, 0x15, + 0xd5, 0x46, 0xdd, 0x9f, 0x06, 0x5a, 0x29, 0x59, 0xaf, 0x02, 0x21, 0xad, 0xa3, 0xb9, 0x10, 0xf8, + 0x76, 0x21, 0x94, 0x5a, 0x47, 0x78, 0x50, 0xec, 0xb3, 0x3c, 0x41, 0xa6, 0x02, 0xec, 0xa1, 0x46, + 0x20, 0x21, 0x14, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xff, 0xe8, 0xd6, 0x09, 0x9c, 0x95, 0xc2, + 0xb5, 0xb1, 0xab, 0xf4, 0x34, 0xb7, 0xe9, 0x86, 0x53, 0xed, 0xab, 0x58, 0xd6, 0x11, 0x32, 0x13, + 0xc6, 0x21, 0x92, 0x14, 0x8e, 0x8b, 0xfe, 0xc9, 0x4d, 0x9b, 0x1c, 0x4c, 0x04, 0xc0, 0x21, 0x72, + 0xc1, 0x59, 0xc9, 0xce, 0xdb, 0x66, 0x09, 0xd2, 0xca, 0xb0, 0xfb, 0xc3, 0x40, 0xab, 0x97, 0xd8, + 0xd6, 0x43, 0xd4, 0xf0, 0x79, 0x9c, 0x26, 0x7a, 0x37, 0xb3, 0xea, 0x73, 0x47, 0x81, 0x34, 0xaf, + 0x59, 0x4f, 0xd0, 0x32, 0x07, 0x11, 0xa7, 0xdc, 0x05, 0x7d, 0x78, 0x66, 0x35, 0x25, 0x5a, 0xe0, + 0xb4, 0x64, 0x58, 0x04, 0x99, 0x11, 0x0b, 0x41, 0x24, 0xcc, 0x85, 0xd6, 0xa2, 0xa6, 0xaf, 0x15, + 0x74, 0x73, 0x6f, 0x52, 0xa0, 0x15, 0xc7, 0xea, 0xa0, 0xba, 0x5a, 0xb4, 0xea, 0x9a, 0x5b, 0x1e, + 0xb4, 0xe2, 0x52, 0x5d, 0xe9, 0x7e, 0x5f, 0x40, 0xcd, 0x43, 0xe0, 0xe3, 0xc0, 0x85, 0xad, 0xdd, + 0x6d, 0x7a, 0x0f, 0x77, 0xf5, 0xcd, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, 0xbb, 0xea, 0xb6, + 0x5a, 0x6f, 0xd1, 0x92, 0x90, 0x4c, 0xa6, 0x42, 0x0f, 0xa5, 0xd9, 0xef, 0xdd, 0xc5, 0x54, 0x0b, + 0x9d, 0xff, 0x0b, 0xdb, 0xa5, 0x7c, 0x4d, 0x0b, 0xc3, 0xee, 0x6f, 0x03, 0xad, 0x4e, 0xb1, 0xef, + 0xe1, 0x29, 0x1c, 0xcc, 0x3e, 0x85, 0xc7, 0x77, 0xc8, 0x72, 0xc5, 0x63, 0xe8, 0xcf, 0x44, 0xd0, + 0xcf, 0xa1, 0x8d, 0x1a, 0x6e, 0xe0, 0x71, 0xd1, 0x32, 0x3a, 0x8b, 0x1b, 0xa6, 0x63, 0x2a, 0x8d, + 0x2a, 0x0a, 0x9a, 0xe3, 0xdd, 0xcf, 0x68, 0x6d, 0x6e, 0x48, 0x96, 0x8b, 0x90, 0x1b, 0x47, 0x5e, + 0x20, 0x83, 0x38, 0xca, 0xa5, 0xb3, 0x07, 0x78, 0x4d, 0xf4, 0xad, 0x89, 0xae, 0xba, 0x1d, 0x25, + 0x24, 0xe8, 0x94, 0xad, 0xb3, 0x7d, 0x72, 0x61, 0xd7, 0x4e, 0x2f, 0xec, 0xda, 0xd9, 0x85, 0x5d, + 0xfb, 0x9a, 0xd9, 0xc6, 0x49, 0x66, 0x1b, 0xa7, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x27, 0xb3, + 0x8d, 0x6f, 0x7f, 0xed, 0xda, 0x3b, 0xfb, 0xfa, 0xff, 0xcf, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x29, 0x82, 0x11, 0x57, 0xb9, 0x06, 0x00, 0x00, +} + +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -314,12 +338,12 @@ func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -347,7 +371,7 @@ func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -357,12 +381,12 @@ func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -394,7 +418,7 @@ func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -404,32 +428,19 @@ func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.IPv6) - copy(dAtA[i:], m.IPv6) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) - i-- - dAtA[i] = 0x22 - i -= len(m.IPv4) - copy(dAtA[i:], m.IPv4) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) - i-- - dAtA[i] = 0x10 - if m.NodeSelector != nil { + if m.ParentRef != nil { { - size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -442,7 +453,7 @@ func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddress) Marshal() (dAtA []byte, err error) { +func (m *ParentReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -452,16 +463,69 @@ func (m *IPAddress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -485,7 +549,7 @@ func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressList) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -495,12 +559,12 @@ func (m *IPAddressList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -532,7 +596,7 @@ func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -542,32 +606,29 @@ func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ParentRef != nil { - { - size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ParentReference) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -577,41 +638,30 @@ func (m *ParentReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } @@ -626,7 +676,7 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ClusterCIDR) Size() (n int) { +func (m *IPAddress) Size() (n int) { if m == nil { return 0 } @@ -639,7 +689,7 @@ func (m *ClusterCIDR) Size() (n int) { return n } -func (m *ClusterCIDRList) Size() (n int) { +func (m *IPAddressList) Size() (n int) { if m == nil { return 0 } @@ -656,25 +706,37 @@ func (m *ClusterCIDRList) Size() (n int) { return n } -func (m *ClusterCIDRSpec) Size() (n int) { +func (m *IPAddressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() + if m.ParentRef != nil { + l = m.ParentRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IPAddress) Size() (n int) { +func (m *ServiceCIDR) Size() (n int) { if m == nil { return 0 } @@ -684,10 +746,12 @@ func (m *IPAddress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IPAddressList) Size() (n int) { +func (m *ServiceCIDRList) Size() (n int) { if m == nil { return 0 } @@ -704,35 +768,33 @@ func (m *IPAddressList) Size() (n int) { return n } -func (m *IPAddressSpec) Size() (n int) { +func (m *ServiceCIDRSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ParentRef != nil { - l = m.ParentRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *ParentReference) Size() (n int) { +func (m *ServiceCIDRStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -742,93 +804,105 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *ClusterCIDR) String() string { +func (this *IPAddress) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ClusterCIDR{`, + s := strings.Join([]string{`&IPAddress{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ClusterCIDRList) String() string { +func (this *IPAddressList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterCIDR{" + repeatedStringForItems := "[]IPAddress{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, + s := strings.Join([]string{`&IPAddressList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *ClusterCIDRSpec) String() string { +func (this *IPAddressSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, `}`, }, "") return s } -func (this *IPAddress) String() string { +func (this *ParentReference) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IPAddress{`, + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IPAddressList) String() string { +func (this *ServiceCIDRList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]IPAddress{" + repeatedStringForItems := "[]ServiceCIDR{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&IPAddressList{`, + s := strings.Join([]string{`&ServiceCIDRList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *IPAddressSpec) String() string { +func (this *ServiceCIDRSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IPAddressSpec{`, - `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, `}`, }, "") return s } -func (this *ParentReference) String() string { +func (this *ServiceCIDRStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ParentReference{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -841,7 +915,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -864,10 +938,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -957,7 +1031,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -980,10 +1054,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1018,10 +1092,94 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 2: + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1048,8 +1206,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1074,7 +1234,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1097,17 +1257,17 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1117,33 +1277,29 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.PerNodeHostBits = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1153,14 +1309,27 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1188,11 +1357,11 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv4 = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1220,7 +1389,7 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv6 = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1243,7 +1412,7 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddress) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1266,10 +1435,10 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1338,6 +1507,39 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1359,7 +1561,7 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddressList) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1382,10 +1584,10 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1450,7 +1652,7 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IPAddress{}) + m.Items = append(m.Items, ServiceCIDR{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1476,7 +1678,7 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1499,17 +1701,17 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1519,27 +1721,23 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ParentRef == nil { - m.ParentRef = &ParentReference{} - } - if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -1562,7 +1760,7 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ParentReference) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1585,113 +1783,17 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1701,55 +1803,25 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index 0f1f30d70..fb7971745 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.networking.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,69 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/networking/v1alpha1"; -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -message ClusterCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ClusterCIDRSpec spec = 2; -} - -// ClusterCIDRList contains a list of ClusterCIDR. -message ClusterCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ClusterCIDRs. - repeated ClusterCIDR items = 2; -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -message ClusterCIDRSpec { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - optional int32 perNodeHostBits = 2; - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv4 = 3; - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv6 = 4; -} - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -147,9 +83,56 @@ message ParentReference { // Name is the name of the object being referenced. // +required optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + repeated string cidrs = 1; +} - // UID is the uid of the object being referenced. +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state // +optional - optional string uid = 5; + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index 8dda6394d..c8f5856b5 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -52,10 +52,10 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 52e4a11e8..9d56ca193 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -17,86 +17,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDRList contains a list of ClusterCIDR. -type ClusterCIDRList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -143,9 +66,6 @@ type ParentReference struct { // Name is the name of the object being referenced. // +required Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` - // UID is the uid of the object being referenced. - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -161,3 +81,70 @@ type IPAddressList struct { // items is the list of IPAddresses. Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 85304784f..481ec0603 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -27,38 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ClusterCIDR = map[string]string{ - "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ClusterCIDR) SwaggerDoc() map[string]string { - return map_ClusterCIDR -} - -var map_ClusterCIDRList = map[string]string{ - "": "ClusterCIDRList contains a list of ClusterCIDR.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ClusterCIDRs.", -} - -func (ClusterCIDRList) SwaggerDoc() map[string]string { - return map_ClusterCIDRList -} - -var map_ClusterCIDRSpec = map[string]string{ - "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", -} - -func (ClusterCIDRSpec) SwaggerDoc() map[string]string { - return map_ClusterCIDRSpec -} - var map_IPAddress = map[string]string{ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -94,11 +62,49 @@ var map_ParentReference = map[string]string{ "resource": "Resource is the resource of the object being referenced.", "namespace": "Namespace is the namespace of the object being referenced.", "name": "Name is the name of the object being referenced.", - "uid": "UID is the uid of the object being referenced.", } func (ParentReference) SwaggerDoc() map[string]string { return map_ParentReference } +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index 97db2eacc..5c8f697ba 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -22,12 +22,12 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { +func (in *IPAddress) DeepCopyInto(out *IPAddress) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -35,18 +35,18 @@ func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { if in == nil { return nil } - out := new(ClusterCIDR) + out := new(IPAddress) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { +func (in *IPAddress) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -54,13 +54,13 @@ func (in *ClusterCIDR) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) + *out = make([]IPAddress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -68,18 +68,18 @@ func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { if in == nil { return nil } - out := new(ClusterCIDRList) + out := new(IPAddressList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { +func (in *IPAddressList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -87,47 +87,64 @@ func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { if in == nil { return nil } - out := new(ClusterCIDRSpec) + out := new(IPAddressSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddress) DeepCopyInto(out *IPAddress) { +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. -func (in *IPAddress) DeepCopy() *IPAddress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { if in == nil { return nil } - out := new(IPAddress) + out := new(ServiceCIDR) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddress) DeepCopyObject() runtime.Object { +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -135,13 +152,13 @@ func (in *IPAddress) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]IPAddress, len(*in)) + *out = make([]ServiceCIDR, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -149,18 +166,18 @@ func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. -func (in *IPAddressList) DeepCopy() *IPAddressList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { if in == nil { return nil } - out := new(IPAddressList) + out := new(ServiceCIDRList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddressList) DeepCopyObject() runtime.Object { +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -168,38 +185,45 @@ func (in *IPAddressList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { *out = *in - if in.ParentRef != nil { - in, out := &in.ParentRef, &out.ParentRef - *out = new(ParentReference) - **out = **in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { if in == nil { return nil } - out := new(IPAddressSpec) + out := new(ServiceCIDRSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentReference) DeepCopyInto(out *ParentReference) { +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. -func (in *ParentReference) DeepCopy() *ParentReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { if in == nil { return nil } - out := new(ParentReference) + out := new(ServiceCIDRStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index 60438ba59..714e7b625 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -23,72 +23,72 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 25 +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 28 +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 31 +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 25 +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 28 +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 31 +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddress) APILifecycleRemoved() (major, minor int) { +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { return 1, 33 } diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index b46af58e4..177cdf523 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -19,6 +19,6 @@ limitations under the License. // +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1 // import "k8s.io/api/policy/v1" diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 9e9c7d13a..76da54b4c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -20,6 +20,6 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 0b75d6415..efba41b3f 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -26,8 +26,6 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -49,94 +47,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} + return fileDescriptor_014060e454a820dc, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,94 +75,10 @@ func (m *Eviction) XXX_DiscardUnknown() { var xxx_messageInfo_Eviction proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{5} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{6} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{7} + return fileDescriptor_014060e454a820dc, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{8} + return fileDescriptor_014060e454a820dc, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{9} + return fileDescriptor_014060e454a820dc, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{10} + return fileDescriptor_014060e454a820dc, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,251 +187,13 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{11} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{12} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{13} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{14} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{15} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{16} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{17} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{18} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -609,132 +201,64 @@ func init() { } var fileDescriptor_014060e454a820dc = []byte{ - // 1946 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x5b, 0x73, 0xdb, 0xc6, - 0x15, 0x16, 0x4c, 0x5d, 0xa8, 0xd5, 0xc5, 0xe2, 0xea, 0x62, 0x48, 0x69, 0x08, 0x07, 0x99, 0xe9, - 0xb8, 0x69, 0x0a, 0xc6, 0xb2, 0xe3, 0x7a, 0x9a, 0x5e, 0x2c, 0x88, 0x92, 0xad, 0x8c, 0x65, 0xb1, - 0x4b, 0x2b, 0xd3, 0x76, 0xdc, 0x4e, 0x97, 0xc0, 0x8a, 0x44, 0x04, 0x02, 0x28, 0x76, 0xc1, 0x88, - 0x6f, 0x79, 0xe8, 0x43, 0x1f, 0xfb, 0x07, 0x32, 0xfd, 0x01, 0x9d, 0x3e, 0xf5, 0x47, 0xd4, 0x99, - 0xe9, 0x74, 0xd2, 0xb7, 0x4c, 0x1f, 0x38, 0x35, 0xfb, 0x2f, 0xfc, 0xd4, 0xc1, 0x72, 0x01, 0x12, - 0x37, 0xd2, 0xce, 0x8c, 0xfd, 0x46, 0xec, 0xf9, 0xbe, 0xef, 0xec, 0x9e, 0xdd, 0x3d, 0x67, 0x77, - 0x09, 0xf4, 0xcb, 0xfb, 0x54, 0xb3, 0xdc, 0xda, 0x65, 0xd0, 0x22, 0xbe, 0x43, 0x18, 0xa1, 0xb5, - 0x1e, 0x71, 0x4c, 0xd7, 0xaf, 0x09, 0x03, 0xf6, 0xac, 0x9a, 0xe7, 0xda, 0x96, 0xd1, 0xaf, 0xf5, - 0x6e, 0xb7, 0x08, 0xc3, 0xb7, 0x6b, 0x6d, 0xe2, 0x10, 0x1f, 0x33, 0x62, 0x6a, 0x9e, 0xef, 0x32, - 0x17, 0xee, 0x8e, 0xa0, 0x1a, 0xf6, 0x2c, 0x6d, 0x04, 0xd5, 0x04, 0x74, 0xef, 0x47, 0x6d, 0x8b, - 0x75, 0x82, 0x96, 0x66, 0xb8, 0xdd, 0x5a, 0xdb, 0x6d, 0xbb, 0x35, 0xce, 0x68, 0x05, 0x17, 0xfc, - 0x8b, 0x7f, 0xf0, 0x5f, 0x23, 0xa5, 0x3d, 0x75, 0xc2, 0xa9, 0xe1, 0xfa, 0xa4, 0xd6, 0xcb, 0x78, - 0xdb, 0xbb, 0x3b, 0xc6, 0x74, 0xb1, 0xd1, 0xb1, 0x1c, 0xe2, 0xf7, 0x6b, 0xde, 0x65, 0x3b, 0x6c, - 0xa0, 0xb5, 0x2e, 0x61, 0x38, 0x8f, 0x55, 0x2b, 0x62, 0xf9, 0x81, 0xc3, 0xac, 0x2e, 0xc9, 0x10, - 0xee, 0xcd, 0x22, 0x50, 0xa3, 0x43, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0x98, 0x65, 0xd7, - 0x2c, 0x87, 0x51, 0xe6, 0xa7, 0x49, 0xea, 0x5d, 0xb0, 0x71, 0x60, 0xdb, 0xee, 0x17, 0xc4, 0x3c, - 0x6c, 0x9e, 0xd4, 0x7d, 0xab, 0x47, 0x7c, 0x78, 0x13, 0xcc, 0x3b, 0xb8, 0x4b, 0x64, 0xe9, 0xa6, - 0x74, 0x6b, 0x59, 0x5f, 0x7d, 0x3e, 0x50, 0xe6, 0x86, 0x03, 0x65, 0xfe, 0x09, 0xee, 0x12, 0xc4, - 0x2d, 0xea, 0x27, 0xa0, 0x22, 0x58, 0xc7, 0x36, 0xb9, 0xfa, 0xcc, 0xb5, 0x83, 0x2e, 0x81, 0xdf, - 0x07, 0x8b, 0x26, 0x17, 0x10, 0xc4, 0x75, 0x41, 0x5c, 0x1c, 0xc9, 0x22, 0x61, 0x55, 0x29, 0xb8, - 0x2e, 0xc8, 0x8f, 0x5c, 0xca, 0x1a, 0x98, 0x75, 0xe0, 0x3e, 0x00, 0x1e, 0x66, 0x9d, 0x86, 0x4f, - 0x2e, 0xac, 0x2b, 0x41, 0x87, 0x82, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0xec, - 0x13, 0x6c, 0x9e, 0x39, 0x76, 0x5f, 0xbe, 0x76, 0x53, 0xba, 0x55, 0xd6, 0x37, 0x04, 0xa3, 0x8c, - 0x44, 0x3b, 0x8a, 0x11, 0xea, 0x7f, 0x24, 0x50, 0x3e, 0xea, 0x59, 0x06, 0xb3, 0x5c, 0x07, 0xfe, - 0x1e, 0x94, 0xc3, 0xd9, 0x32, 0x31, 0xc3, 0xdc, 0xd9, 0xca, 0xfe, 0x47, 0xda, 0x78, 0x25, 0xc5, - 0xc1, 0xd3, 0xbc, 0xcb, 0x76, 0xd8, 0x40, 0xb5, 0x10, 0xad, 0xf5, 0x6e, 0x6b, 0x67, 0xad, 0xcf, - 0x89, 0xc1, 0x4e, 0x09, 0xc3, 0xe3, 0xee, 0x8d, 0xdb, 0x50, 0xac, 0x0a, 0x6d, 0xb0, 0x66, 0x12, - 0x9b, 0x30, 0x72, 0xe6, 0x85, 0x1e, 0x29, 0xef, 0xe1, 0xca, 0xfe, 0x9d, 0x57, 0x73, 0x53, 0x9f, - 0xa4, 0xea, 0x95, 0xe1, 0x40, 0x59, 0x4b, 0x34, 0xa1, 0xa4, 0xb8, 0xfa, 0x95, 0x04, 0x76, 0x8e, - 0x9b, 0x0f, 0x7d, 0x37, 0xf0, 0x9a, 0x2c, 0x9c, 0xdd, 0x76, 0x5f, 0x98, 0xe0, 0x8f, 0xc1, 0xbc, - 0x1f, 0xd8, 0xd1, 0x5c, 0xbe, 0x1f, 0xcd, 0x25, 0x0a, 0x6c, 0xf2, 0x72, 0xa0, 0x6c, 0xa6, 0x58, - 0x4f, 0xfb, 0x1e, 0x41, 0x9c, 0x00, 0x3f, 0x05, 0x8b, 0x3e, 0x76, 0xda, 0x24, 0xec, 0x7a, 0xe9, - 0xd6, 0xca, 0xbe, 0xaa, 0x15, 0xee, 0x35, 0xed, 0xa4, 0x8e, 0x42, 0xe8, 0x78, 0xc6, 0xf9, 0x27, - 0x45, 0x42, 0x41, 0x3d, 0x05, 0x6b, 0x7c, 0xaa, 0x5d, 0x9f, 0x71, 0x0b, 0x7c, 0x17, 0x94, 0xba, - 0x96, 0xc3, 0x3b, 0xb5, 0xa0, 0xaf, 0x08, 0x56, 0xe9, 0xd4, 0x72, 0x50, 0xd8, 0xce, 0xcd, 0xf8, - 0x8a, 0xc7, 0x6c, 0xd2, 0x8c, 0xaf, 0x50, 0xd8, 0xae, 0x3e, 0x04, 0x4b, 0xc2, 0xe3, 0xa4, 0x50, - 0x69, 0xba, 0x50, 0x29, 0x47, 0xe8, 0xaf, 0xd7, 0xc0, 0x66, 0xc3, 0x35, 0xeb, 0x16, 0xf5, 0x03, - 0x1e, 0x2f, 0x3d, 0x30, 0xdb, 0x84, 0xbd, 0x85, 0xf5, 0xf1, 0x14, 0xcc, 0x53, 0x8f, 0x18, 0x62, - 0x59, 0xec, 0x4f, 0x89, 0x6d, 0x4e, 0xff, 0x9a, 0x1e, 0x31, 0xc6, 0xdb, 0x32, 0xfc, 0x42, 0x5c, - 0x0d, 0x3e, 0x03, 0x8b, 0x94, 0x61, 0x16, 0x50, 0xb9, 0xc4, 0x75, 0xef, 0xbe, 0xa6, 0x2e, 0xe7, - 0x8e, 0x67, 0x71, 0xf4, 0x8d, 0x84, 0xa6, 0xfa, 0x4f, 0x09, 0xdc, 0xc8, 0x61, 0x3d, 0xb6, 0x28, - 0x83, 0xcf, 0x32, 0x11, 0xd3, 0x5e, 0x2d, 0x62, 0x21, 0x9b, 0xc7, 0x2b, 0xde, 0xbc, 0x51, 0xcb, - 0x44, 0xb4, 0x9a, 0x60, 0xc1, 0x62, 0xa4, 0x1b, 0x2d, 0x45, 0xed, 0xf5, 0x86, 0xa5, 0xaf, 0x09, - 0xe9, 0x85, 0x93, 0x50, 0x04, 0x8d, 0xb4, 0xd4, 0x7f, 0x97, 0x72, 0x87, 0x13, 0x86, 0x13, 0x5e, - 0x80, 0xd5, 0xae, 0xe5, 0x1c, 0xf4, 0xb0, 0x65, 0xe3, 0x96, 0xd8, 0x3d, 0xd3, 0x16, 0x41, 0x98, - 0x61, 0xb5, 0x51, 0x86, 0xd5, 0x4e, 0x1c, 0x76, 0xe6, 0x37, 0x99, 0x6f, 0x39, 0x6d, 0x7d, 0x63, - 0x38, 0x50, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x16, 0x94, 0x29, 0xb1, 0x89, 0xc1, - 0x5c, 0xff, 0xf5, 0x32, 0xc4, 0x63, 0xdc, 0x22, 0x76, 0x53, 0x50, 0xf5, 0xd5, 0x30, 0x6e, 0xd1, - 0x17, 0x8a, 0x25, 0xa1, 0x0d, 0xd6, 0xbb, 0xf8, 0xea, 0xdc, 0xc1, 0xf1, 0x40, 0x4a, 0xdf, 0x71, - 0x20, 0x70, 0x38, 0x50, 0xd6, 0x4f, 0x13, 0x5a, 0x28, 0xa5, 0x0d, 0xbf, 0x94, 0xc0, 0x5e, 0xe0, - 0x74, 0x08, 0xb6, 0x59, 0xa7, 0xdf, 0x70, 0xcd, 0x28, 0xdd, 0x36, 0xf8, 0x0c, 0xc9, 0xf3, 0x3c, - 0x03, 0x3d, 0x18, 0x0e, 0x94, 0xbd, 0xf3, 0x42, 0xd4, 0xcb, 0x81, 0x52, 0x2d, 0xb6, 0xf2, 0xf4, - 0x34, 0xc5, 0x87, 0xfa, 0x8f, 0x05, 0xb0, 0x5b, 0xb8, 0xb0, 0xe1, 0xa7, 0x00, 0xba, 0x2d, 0x4a, - 0xfc, 0x1e, 0x31, 0x1f, 0x8e, 0xca, 0xa0, 0xe5, 0x46, 0xb9, 0x63, 0x4f, 0xac, 0x11, 0x78, 0x96, - 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0xa3, 0x04, 0xd6, 0xcc, 0x91, 0x1b, 0x62, 0x36, 0x5c, 0x33, 0x5a, - 0x9b, 0x0f, 0xbf, 0xcb, 0x96, 0xd3, 0xea, 0x93, 0x4a, 0x47, 0x0e, 0xf3, 0xfb, 0xfa, 0xb6, 0xe8, - 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xc3, 0x21, 0x99, 0xb1, 0x24, 0x15, 0x65, 0x95, 0xcf, 0xf2, - 0xc2, 0x78, 0x48, 0xf5, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0xcf, 0xc1, 0xba, 0x11, 0xf8, 0x3e, 0x71, - 0xd8, 0xa3, 0x51, 0x7c, 0xf9, 0x94, 0x2d, 0xe8, 0x3b, 0x42, 0x67, 0xfd, 0x30, 0x61, 0x45, 0x29, - 0x74, 0xc8, 0x37, 0x09, 0xb5, 0x7c, 0x62, 0x46, 0xfc, 0x85, 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, - 0x1a, 0xde, 0x07, 0xab, 0xe4, 0xca, 0x23, 0x46, 0x14, 0xd0, 0x45, 0xce, 0xde, 0x12, 0xec, 0xd5, - 0xa3, 0x09, 0x1b, 0x4a, 0x20, 0xa1, 0x01, 0x80, 0xe1, 0x3a, 0xa6, 0x35, 0x2a, 0xb5, 0x4b, 0x7c, - 0x22, 0x6a, 0xaf, 0xb6, 0x91, 0x0e, 0x23, 0xde, 0x38, 0x61, 0xc7, 0x4d, 0x14, 0x4d, 0xc8, 0xee, - 0xd9, 0x00, 0x66, 0xa7, 0x09, 0x6e, 0x80, 0xd2, 0x25, 0xe9, 0x8f, 0xca, 0x2b, 0x0a, 0x7f, 0xc2, - 0x07, 0x60, 0xa1, 0x87, 0xed, 0x80, 0x88, 0x0d, 0xfd, 0xc1, 0xab, 0xf5, 0xe3, 0xa9, 0xd5, 0x25, - 0x68, 0x44, 0xfc, 0xc9, 0xb5, 0xfb, 0x92, 0xfa, 0xb5, 0x04, 0x2a, 0x0d, 0xd7, 0x6c, 0x12, 0x23, - 0xf0, 0x2d, 0xd6, 0x1f, 0xad, 0xef, 0xb7, 0x50, 0x98, 0x50, 0xa2, 0x30, 0x7d, 0x34, 0x7d, 0x35, - 0x27, 0x7b, 0x57, 0x54, 0x96, 0xd4, 0xe7, 0x12, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0xd9, 0xf8, 0x65, - 0xb2, 0x6c, 0x7c, 0xf8, 0x3a, 0x83, 0x29, 0x28, 0x1a, 0x5f, 0x57, 0x72, 0x86, 0xc2, 0x4b, 0x46, - 0x78, 0x84, 0xf5, 0xad, 0x9e, 0x65, 0x93, 0x36, 0x31, 0xf9, 0x60, 0xca, 0x13, 0x47, 0xd8, 0xd8, - 0x82, 0x26, 0x50, 0x90, 0x82, 0x1d, 0x93, 0x5c, 0xe0, 0xc0, 0x66, 0x07, 0xa6, 0x79, 0x88, 0x3d, - 0xdc, 0xb2, 0x6c, 0x8b, 0x59, 0xe2, 0xcc, 0xb5, 0xac, 0x7f, 0x32, 0x1c, 0x28, 0x3b, 0xf5, 0x5c, - 0xc4, 0xcb, 0x81, 0xf2, 0x6e, 0xf6, 0xca, 0xa2, 0xc5, 0x90, 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0x20, - 0xfb, 0xe4, 0x0f, 0x41, 0xb8, 0xf3, 0xea, 0xbe, 0xeb, 0x25, 0xdc, 0x96, 0xb8, 0xdb, 0x9f, 0x0d, - 0x07, 0x8a, 0x8c, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0xcf, 0xc1, 0x26, 0x16, 0x97, 0x8d, - 0x49, 0xaf, 0xf3, 0xdc, 0xeb, 0xfd, 0xe1, 0x40, 0xd9, 0x3c, 0xc8, 0x9a, 0x67, 0x3b, 0xcc, 0x13, - 0x85, 0x35, 0xb0, 0xd4, 0xe3, 0xf7, 0x12, 0x2a, 0x2f, 0x70, 0xfd, 0xed, 0xe1, 0x40, 0x59, 0x1a, - 0x5d, 0x55, 0x42, 0xcd, 0xc5, 0xe3, 0x26, 0x2f, 0x27, 0x11, 0x0a, 0x7e, 0x0c, 0x56, 0x3a, 0x2e, - 0x65, 0x4f, 0x08, 0xfb, 0xc2, 0xf5, 0x2f, 0x79, 0xf6, 0x29, 0xeb, 0x9b, 0x62, 0x06, 0x57, 0x1e, - 0x8d, 0x4d, 0x68, 0x12, 0x07, 0x7f, 0x0d, 0x96, 0x3b, 0xe2, 0x6c, 0x1b, 0xa5, 0x9e, 0x5b, 0x53, - 0x16, 0x5a, 0xe2, 0x1c, 0xac, 0x57, 0x84, 0xfc, 0x72, 0xd4, 0x4c, 0xd1, 0x58, 0x0d, 0xfe, 0x00, - 0x2c, 0xf1, 0x8f, 0x93, 0xba, 0x5c, 0xe6, 0xbd, 0xb9, 0x2e, 0xe0, 0x4b, 0x8f, 0x46, 0xcd, 0x28, - 0xb2, 0x47, 0xd0, 0x93, 0xc6, 0xa1, 0xbc, 0x9c, 0x85, 0x9e, 0x34, 0x0e, 0x51, 0x64, 0x87, 0xcf, - 0xc0, 0x12, 0x25, 0x8f, 0x2d, 0x27, 0xb8, 0x92, 0x01, 0xdf, 0x72, 0xb7, 0xa7, 0x74, 0xb7, 0x79, - 0xc4, 0x91, 0xa9, 0x5b, 0xc5, 0x58, 0x5d, 0xd8, 0x51, 0x24, 0x09, 0x4d, 0xb0, 0xec, 0x07, 0xce, - 0x01, 0x3d, 0xa7, 0xc4, 0x97, 0x57, 0x32, 0x47, 0x9a, 0xb4, 0x3e, 0x8a, 0xb0, 0x69, 0x0f, 0x71, - 0x64, 0x62, 0x04, 0x1a, 0x0b, 0x43, 0x13, 0x00, 0xfe, 0xc1, 0x2f, 0x2f, 0xf2, 0xce, 0xcc, 0xc3, - 0x2e, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf6, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, 0x93, 0x04, - 0x20, 0x0d, 0x3c, 0xcf, 0x26, 0x5d, 0xe2, 0x30, 0x6c, 0xf3, 0x56, 0x2a, 0xaf, 0x72, 0x77, 0x3f, - 0x9d, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0xae, 0xcd, 0x59, 0x28, 0xca, 0xf1, 0x19, 0x4e, 0xda, - 0x85, 0x18, 0xed, 0xda, 0xcc, 0x49, 0xcb, 0xbf, 0x0a, 0x8e, 0x27, 0x4d, 0xd8, 0x51, 0x24, 0x09, - 0x3f, 0x03, 0x3b, 0xd1, 0x45, 0x19, 0xb9, 0x2e, 0x3b, 0xb6, 0x6c, 0x42, 0xfb, 0x94, 0x91, 0xae, - 0xbc, 0xce, 0x17, 0x53, 0x55, 0x30, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x28, 0x51, - 0x12, 0x0a, 0x77, 0x68, 0x9c, 0x05, 0x8f, 0xa8, 0x81, 0xed, 0xd1, 0xe9, 0xeb, 0x3a, 0x77, 0xf0, - 0xfe, 0x70, 0xa0, 0x28, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x0a, 0xc8, 0xb8, 0xc8, 0xcf, - 0x06, 0xf7, 0xf3, 0xbd, 0x30, 0xb3, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x0f, 0x6c, 0xe0, 0xe4, 0x93, - 0x05, 0x95, 0x2b, 0x7c, 0xaf, 0x7f, 0x30, 0x65, 0x1e, 0x52, 0xaf, 0x1c, 0xba, 0x2c, 0xc2, 0xb8, - 0x91, 0x32, 0x50, 0x94, 0x51, 0x87, 0x57, 0x00, 0xe2, 0xf4, 0x0b, 0x0b, 0x95, 0xe1, 0xcc, 0x42, - 0x96, 0x79, 0x96, 0x19, 0x2f, 0xb5, 0x8c, 0x89, 0xa2, 0x1c, 0x1f, 0x90, 0x81, 0x0a, 0x4e, 0xbd, - 0x08, 0x51, 0xf9, 0x06, 0x77, 0xfc, 0xc3, 0xd9, 0x8e, 0x63, 0x8e, 0xbe, 0x2b, 0xfc, 0x56, 0xd2, - 0x16, 0x8a, 0xb2, 0x0e, 0xe0, 0x63, 0xb0, 0x25, 0x1a, 0xcf, 0x1d, 0x8a, 0x2f, 0x48, 0xb3, 0x4f, - 0x0d, 0x66, 0x53, 0x79, 0x93, 0xe7, 0x6e, 0x79, 0x38, 0x50, 0xb6, 0x0e, 0x72, 0xec, 0x28, 0x97, - 0x05, 0x1f, 0x80, 0x8d, 0x0b, 0xd7, 0x6f, 0x59, 0xa6, 0x49, 0x9c, 0x48, 0x69, 0x8b, 0x2b, 0x6d, - 0x85, 0xf1, 0x3f, 0x4e, 0xd9, 0x50, 0x06, 0x0d, 0x29, 0xd8, 0x16, 0xca, 0x0d, 0xdf, 0x35, 0x4e, - 0xdd, 0xc0, 0x61, 0x61, 0xb9, 0xa0, 0xf2, 0x76, 0x5c, 0x22, 0xb7, 0x0f, 0xf2, 0x00, 0x2f, 0x07, - 0xca, 0xcd, 0x9c, 0x72, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0x68, 0x83, 0x55, 0xf1, 0xc6, 0x77, 0x68, - 0x63, 0x4a, 0x65, 0x99, 0x6f, 0xf5, 0x7b, 0xd3, 0x13, 0x5b, 0x0c, 0x4f, 0xef, 0x77, 0x7e, 0xf9, - 0x9c, 0x04, 0xa0, 0x84, 0xba, 0xfa, 0x17, 0x09, 0xec, 0x16, 0x26, 0x46, 0x78, 0x2f, 0xf1, 0x70, - 0xa4, 0xa6, 0x1e, 0x8e, 0x60, 0x96, 0xf8, 0x06, 0xde, 0x8d, 0xbe, 0x92, 0x80, 0x5c, 0x54, 0x21, - 0xe0, 0xc7, 0x89, 0x0e, 0xbe, 0x97, 0xea, 0x60, 0x25, 0xc3, 0x7b, 0x03, 0xfd, 0xfb, 0x97, 0x04, - 0xde, 0x99, 0x32, 0x03, 0x71, 0x42, 0x22, 0xe6, 0x24, 0xea, 0x09, 0x0e, 0xb7, 0xb2, 0xc4, 0xd7, - 0xd1, 0x38, 0x21, 0xe5, 0x60, 0x50, 0x21, 0x1b, 0x9e, 0x83, 0x1b, 0x22, 0x1b, 0xa6, 0x6d, 0xfc, - 0xe4, 0xbe, 0xac, 0xbf, 0x33, 0x1c, 0x28, 0x37, 0xea, 0xf9, 0x10, 0x54, 0xc4, 0x55, 0xff, 0x26, - 0x81, 0x9d, 0xfc, 0x92, 0x0f, 0xef, 0x24, 0xc2, 0xad, 0xa4, 0xc2, 0x7d, 0x3d, 0xc5, 0x12, 0xc1, - 0xfe, 0x1d, 0x58, 0x17, 0x07, 0x83, 0xe4, 0x3b, 0x68, 0x22, 0xe8, 0xe1, 0x16, 0x09, 0xcf, 0xf4, - 0x42, 0x22, 0x5a, 0xbe, 0xfc, 0xc9, 0x21, 0xd9, 0x86, 0x52, 0x6a, 0xea, 0xdf, 0x25, 0xf0, 0xde, - 0xcc, 0x62, 0x0b, 0xf5, 0x44, 0xd7, 0xb5, 0x54, 0xd7, 0xab, 0xc5, 0x02, 0x6f, 0xe6, 0x39, 0x54, - 0xff, 0xc5, 0xf3, 0x17, 0xd5, 0xb9, 0x6f, 0x5e, 0x54, 0xe7, 0xbe, 0x7d, 0x51, 0x9d, 0xfb, 0x72, - 0x58, 0x95, 0x9e, 0x0f, 0xab, 0xd2, 0x37, 0xc3, 0xaa, 0xf4, 0xed, 0xb0, 0x2a, 0xfd, 0x77, 0x58, - 0x95, 0xfe, 0xfc, 0xbf, 0xea, 0xdc, 0x6f, 0x76, 0x0b, 0xff, 0x06, 0xf9, 0x7f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xb4, 0x84, 0x53, 0xfb, 0x3b, 0x19, 0x00, 0x00, + // 857 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0xc7, 0x45, 0xcb, 0x72, 0x9d, 0xad, 0x24, 0xb8, 0xdb, 0x2f, 0x5b, 0x07, 0x2a, 0xd0, 0x29, + 0x28, 0xd0, 0x65, 0x9d, 0x04, 0x85, 0xd1, 0x43, 0x9b, 0x30, 0x32, 0xd2, 0x14, 0x31, 0x6c, 0xac, + 0x9c, 0x4b, 0x91, 0x02, 0x5d, 0x91, 0x13, 0x69, 0x2b, 0x92, 0x4b, 0x70, 0x97, 0x6c, 0x74, 0xcb, + 0xa1, 0x0f, 0xd0, 0xf7, 0xe8, 0x83, 0xd4, 0x87, 0x1e, 0xd2, 0x5b, 0xd0, 0x83, 0x50, 0xb3, 0x6f, + 0xd1, 0x53, 0xc1, 0x25, 0xf5, 0x41, 0x7d, 0x34, 0x4a, 0x0e, 0xb9, 0x71, 0x67, 0xe6, 0xff, 0x1b, + 0xce, 0xc7, 0x52, 0x42, 0xf6, 0xe8, 0x44, 0x12, 0x2e, 0xac, 0x51, 0xdc, 0x87, 0x28, 0x00, 0x05, + 0xd2, 0x4a, 0x20, 0x70, 0x45, 0x64, 0x15, 0x0e, 0x16, 0x72, 0x2b, 0x14, 0x1e, 0x77, 0xc6, 0x56, + 0x72, 0xdc, 0x07, 0xc5, 0x8e, 0xad, 0x01, 0x04, 0x10, 0x31, 0x05, 0x2e, 0x09, 0x23, 0xa1, 0x04, + 0x3e, 0xca, 0x43, 0x09, 0x0b, 0x39, 0xc9, 0x43, 0x49, 0x11, 0xda, 0xfa, 0x7c, 0xc0, 0xd5, 0x30, + 0xee, 0x13, 0x47, 0xf8, 0xd6, 0x40, 0x0c, 0x84, 0xa5, 0x15, 0xfd, 0xf8, 0x99, 0x3e, 0xe9, 0x83, + 0x7e, 0xca, 0x49, 0xad, 0xbb, 0xf3, 0xa4, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0xa3, + 0x41, 0x66, 0x90, 0x96, 0x0f, 0x8a, 0x59, 0xc9, 0x4a, 0xfe, 0x96, 0xb5, 0x49, 0x15, 0xc5, 0x81, + 0xe2, 0x3e, 0xac, 0x08, 0xbe, 0x7c, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xce, 0x26, + 0x5d, 0xac, 0xb8, 0x67, 0xf1, 0x40, 0x49, 0x15, 0x2d, 0x8b, 0x3a, 0x7f, 0x19, 0x68, 0xff, 0x34, + 0xe1, 0x8e, 0xe2, 0x22, 0xc0, 0x3f, 0xa2, 0xfd, 0xac, 0x0a, 0x97, 0x29, 0x76, 0x68, 0xdc, 0x34, + 0x6e, 0xbd, 0x7f, 0xfb, 0x0b, 0x32, 0xef, 0xde, 0x0c, 0x4a, 0xc2, 0xd1, 0x20, 0x33, 0x48, 0x92, + 0x45, 0x93, 0xe4, 0x98, 0x9c, 0xf7, 0x7f, 0x02, 0x47, 0x9d, 0x81, 0x62, 0x36, 0xbe, 0x9a, 0xb4, + 0x2b, 0xe9, 0xa4, 0x8d, 0xe6, 0x36, 0x3a, 0xa3, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0xc1, 0x79, + 0x98, 0x65, 0x94, 0x87, 0x3b, 0x3a, 0xcd, 0x9d, 0xed, 0xd2, 0x74, 0x17, 0xa5, 0xf6, 0x07, 0xe9, + 0xa4, 0xdd, 0x28, 0x99, 0x68, 0x19, 0xde, 0xf9, 0x6d, 0x07, 0x7d, 0x78, 0x21, 0xdc, 0x2e, 0x97, + 0x51, 0xac, 0x4d, 0x76, 0xec, 0x0e, 0x40, 0xbd, 0x83, 0x3a, 0x2f, 0xd1, 0xae, 0x0c, 0xc1, 0x29, + 0xca, 0xbb, 0x4d, 0x36, 0xee, 0x20, 0x59, 0xf3, 0x7e, 0xbd, 0x10, 0x1c, 0xbb, 0x5e, 0xf0, 0x77, + 0xb3, 0x13, 0xd5, 0x34, 0xfc, 0x14, 0xed, 0x49, 0xc5, 0x54, 0x2c, 0x0f, 0xab, 0x9a, 0x7b, 0xf7, + 0x0d, 0xb9, 0x5a, 0x6b, 0x37, 0x0b, 0xf2, 0x5e, 0x7e, 0xa6, 0x05, 0xb3, 0xf3, 0x87, 0x81, 0x3e, + 0x5d, 0xa3, 0x7a, 0xcc, 0xa5, 0xc2, 0x4f, 0x57, 0x3a, 0x46, 0xb6, 0xeb, 0x58, 0xa6, 0xd6, 0xfd, + 0x3a, 0x28, 0xb2, 0xee, 0x4f, 0x2d, 0x0b, 0xdd, 0xea, 0xa1, 0x1a, 0x57, 0xe0, 0x67, 0xdb, 0x50, + 0x5d, 0x42, 0x6f, 0x51, 0x96, 0xdd, 0x28, 0xd0, 0xb5, 0x47, 0x19, 0x84, 0xe6, 0xac, 0xce, 0x9f, + 0xd5, 0xb5, 0xe5, 0x64, 0xed, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0xfd, 0x84, 0x71, 0x8f, 0xf5, + 0x3d, 0x78, 0xed, 0x12, 0x64, 0x37, 0x88, 0xe4, 0x37, 0x88, 0x3c, 0x0a, 0xd4, 0x79, 0xd4, 0x53, + 0x11, 0x0f, 0x06, 0xf6, 0x41, 0x3a, 0x69, 0xd7, 0xcf, 0x16, 0x48, 0xb4, 0xc4, 0xc5, 0x3f, 0xa0, + 0x7d, 0x09, 0x1e, 0x38, 0x4a, 0x44, 0x6f, 0xb6, 0xe9, 0x8f, 0x59, 0x1f, 0xbc, 0x5e, 0x21, 0xb5, + 0xeb, 0x59, 0xdf, 0xa6, 0x27, 0x3a, 0x43, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x49, 0xc0, 0x66, + 0x85, 0x54, 0xdf, 0xb2, 0x10, 0x9c, 0x4e, 0xda, 0xcd, 0xb3, 0x12, 0x8b, 0x2e, 0xb1, 0xf1, 0x0b, + 0x03, 0xb5, 0xe2, 0x60, 0x08, 0xcc, 0x53, 0xc3, 0xf1, 0x85, 0x70, 0xa7, 0x9f, 0x8d, 0x0b, 0x3d, + 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, 0x18, 0xf5, + 0xef, 0xa4, 0x6d, 0x6e, 0xf6, 0x5e, 0x8e, 0x43, 0xa0, 0xff, 0x93, 0xa3, 0xf3, 0x7b, 0x0d, 0x1d, + 0x6d, 0x5c, 0x6c, 0xfc, 0x1d, 0xc2, 0xa2, 0x2f, 0x21, 0x4a, 0xc0, 0x7d, 0x98, 0x7f, 0xe6, 0xb8, + 0x08, 0xf4, 0x6c, 0xab, 0x76, 0xab, 0xd8, 0x11, 0x7c, 0xbe, 0x12, 0x41, 0xd7, 0xa8, 0xf0, 0x2f, + 0x06, 0x6a, 0xb8, 0x79, 0x1a, 0x70, 0x2f, 0x84, 0x3b, 0xdd, 0xcd, 0x87, 0x6f, 0x73, 0xe5, 0x48, + 0x77, 0x91, 0x74, 0x1a, 0xa8, 0x68, 0x6c, 0x7f, 0x5c, 0xbc, 0x50, 0xa3, 0xe4, 0xa3, 0xe5, 0xa4, + 0x59, 0x49, 0xee, 0x0c, 0x29, 0xef, 0x7b, 0x9e, 0xf8, 0x19, 0x5c, 0x3d, 0xe5, 0xda, 0xbc, 0xa4, + 0xee, 0x4a, 0x04, 0x5d, 0xa3, 0xc2, 0x5f, 0xa3, 0xa6, 0x13, 0x47, 0x11, 0x04, 0xea, 0xdb, 0xbc, + 0xbf, 0x7a, 0x64, 0x35, 0xfb, 0x93, 0x82, 0xd3, 0x7c, 0x50, 0xf2, 0xd2, 0xa5, 0xe8, 0x4c, 0xef, + 0x82, 0xe4, 0x11, 0xb8, 0x53, 0x7d, 0xad, 0xac, 0xef, 0x96, 0xbc, 0x74, 0x29, 0x1a, 0x9f, 0xa0, + 0x3a, 0x3c, 0x0f, 0xc1, 0x99, 0x36, 0x74, 0x4f, 0xab, 0x3f, 0x2a, 0xd4, 0xf5, 0xd3, 0x05, 0x1f, + 0x2d, 0x45, 0x62, 0x07, 0x21, 0x47, 0x04, 0x2e, 0xcf, 0x7f, 0x32, 0xde, 0xd3, 0x83, 0xb0, 0xb6, + 0xbb, 0x48, 0x0f, 0xa6, 0xba, 0xf9, 0x07, 0x7b, 0x66, 0x92, 0x74, 0x01, 0xdb, 0xf2, 0x10, 0x5e, + 0x1d, 0x13, 0x3e, 0x40, 0xd5, 0x11, 0x8c, 0xf5, 0x12, 0xdd, 0xa0, 0xd9, 0x23, 0xbe, 0x87, 0x6a, + 0x09, 0xf3, 0x62, 0x28, 0x2e, 0xf4, 0x67, 0xdb, 0xbd, 0xc7, 0x25, 0xf7, 0x81, 0xe6, 0xc2, 0xaf, + 0x76, 0x4e, 0x0c, 0xfb, 0x9b, 0xab, 0x6b, 0xb3, 0xf2, 0xf2, 0xda, 0xac, 0xbc, 0xba, 0x36, 0x2b, + 0x2f, 0x52, 0xd3, 0xb8, 0x4a, 0x4d, 0xe3, 0x65, 0x6a, 0x1a, 0xaf, 0x52, 0xd3, 0xf8, 0x3b, 0x35, + 0x8d, 0x5f, 0xff, 0x31, 0x2b, 0xdf, 0x1f, 0x6d, 0xfc, 0x9b, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x96, 0x9a, 0x3a, 0xb5, 0x1b, 0x09, 0x00, 0x00, } -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { +func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -744,104 +268,12 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Eviction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -871,106 +303,6 @@ func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1218,3446 +550,247 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[offset] = uint8(v) + return base } - -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 } - i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Eviction) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x52 - i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + repeatedStringForConditions += "}" + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } - -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedFlexVolume) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Eviction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UnhealthyPodEvictionPolicy != nil { - l = len(*m.UnhealthyPodEvictionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Eviction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodDisruptionBudget{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) - } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MinAvailable == nil { - m.MinAvailable = &intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) - m.UnhealthyPodEvictionPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]v1.Time) - } - var mapkey string - mapvalue := &v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.DisruptedPods[mapkey] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) - } - m.DisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpectedPods |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4684,14 +817,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4718,110 +850,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 22: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4848,16 +936,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4884,14 +969,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4918,10 +1002,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4946,7 +1027,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4969,17 +1050,17 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4989,27 +1070,28 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5036,8 +1118,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5062,7 +1144,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5085,17 +1167,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5105,27 +1187,31 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + if m.MinAvailable == nil { + m.MinAvailable = &intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5152,66 +1238,18 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5221,27 +1259,31 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5269,8 +1311,8 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5293,7 +1335,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5316,17 +1358,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5336,27 +1378,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5383,68 +1412,149 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]v1.Time) } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.DisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DisruptionsAllowed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) } - var stringLen uint64 + m.DesiredHealthy = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5454,27 +1564,33 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5501,8 +1617,8 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 16301c236..d1409913f 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.policy.v1beta1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -30,35 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/policy/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is // created by POSTing to .../pods//evictions. @@ -72,37 +42,6 @@ message Eviction { optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { // Standard object's metadata. @@ -238,219 +177,3 @@ message PodDisruptionBudgetStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index b3efd6326..d77f13040 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -46,8 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &Eviction{}, ) // Add the watch version that applies diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 1e6b075e3..bc5f970d2 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -228,373 +227,3 @@ type Eviction struct { // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities -// field and means that any capabilities are allowed to be requested. -var AllowAllCapabilities v1.Capability = "*" - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - StorageOS FSType = "storageos" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - CSI FSType = "csi" - Ephemeral FSType = "ephemeral" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsUserStrategyRunAsAny means that container may make requests for any gid. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. - // However, when FSGroups are specified, they have to fall in the defined range. - FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when gids are specified, they have to fall in the defined range. - SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 266a9a853..4a79d7594 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,34 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_Eviction = map[string]string{ "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", "metadata": "ObjectMeta describes the pod that is being evicted.", @@ -65,36 +37,6 @@ func (Eviction) SwaggerDoc() map[string]string { return map_Eviction } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_PodDisruptionBudget = map[string]string{ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -143,106 +85,4 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { return map_PodDisruptionBudgetStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 8602d1adc..51895ffdb 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,60 +22,11 @@ limitations under the License. package v1beta1 import ( - corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in @@ -107,59 +58,6 @@ func (in *Eviction) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { *out = *in @@ -286,268 +184,3 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 612061d6c..765a71e47 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -90,39 +90,3 @@ func (in *PodDisruptionBudgetList) APILifecycleReplacement() schema.GroupVersion func (in *PodDisruptionBudgetList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto index 02412398c..f7748f9a1 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -107,7 +107,7 @@ message PodSchedulingContextSpec { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional repeated string potentialNodes = 2; } @@ -208,7 +208,7 @@ message ResourceClaimSchedulingStatus { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional repeated string unsuitableNodes = 2; } diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go index 21936bfe3..a614ff9dc 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -248,7 +248,7 @@ type PodSchedulingContextSpec struct { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } @@ -283,7 +283,7 @@ type ResourceClaimSchedulingStatus struct { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 5f8eccaef..b35f708c6 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -88,7 +88,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index c785f368e..7d7b7664b 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -291,7 +291,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index c92a7f95a..69ee68361 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1f3f38010..2b15ec3fe 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -243,10 +243,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{7} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{8} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{7} + return fileDescriptor_10f856db1e670dc4, []int{9} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -280,6 +336,9 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } @@ -288,65 +347,71 @@ func init() { } var fileDescriptor_10f856db1e670dc4 = []byte{ - // 925 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3f, 0x6f, 0x23, 0x45, - 0x14, 0xf7, 0xc6, 0xce, 0x9d, 0x6f, 0x1c, 0xc0, 0x37, 0x32, 0x87, 0xe5, 0x93, 0xd6, 0x91, 0x2b, - 0x83, 0xb8, 0x59, 0x72, 0x20, 0x74, 0xa2, 0xf3, 0x26, 0x29, 0x22, 0x92, 0x00, 0xe3, 0x08, 0x21, - 0xa0, 0x60, 0xbc, 0x7e, 0xd8, 0x13, 0x7b, 0xff, 0x68, 0x67, 0x36, 0xc2, 0x54, 0x54, 0xd4, 0x74, - 0x7c, 0x03, 0x3e, 0x4b, 0x0a, 0x24, 0x4e, 0x54, 0x57, 0x59, 0x64, 0xf9, 0x0e, 0x14, 0x34, 0xa0, - 0x9d, 0x1d, 0xaf, 0x37, 0x5e, 0x27, 0xe7, 0x4b, 0x71, 0x9d, 0xdf, 0x9b, 0xf7, 0x7e, 0xbf, 0xf7, - 0xdf, 0x8b, 0x0e, 0x26, 0xcf, 0x04, 0xe1, 0xbe, 0x35, 0x89, 0x06, 0x10, 0x7a, 0x20, 0x41, 0x58, - 0x17, 0xe0, 0x0d, 0xfd, 0xd0, 0xd2, 0x0f, 0x2c, 0xe0, 0x96, 0x90, 0x7e, 0xc8, 0x46, 0x60, 0x5d, - 0xec, 0xb1, 0x69, 0x30, 0x66, 0x7b, 0xd6, 0x08, 0x3c, 0x08, 0x99, 0x84, 0x21, 0x09, 0x42, 0x5f, - 0xfa, 0xf8, 0x71, 0x6a, 0x4c, 0x58, 0xc0, 0x89, 0x36, 0x26, 0x0b, 0xe3, 0xd6, 0x93, 0x11, 0x97, - 0xe3, 0x68, 0x40, 0x1c, 0xdf, 0xb5, 0x46, 0xfe, 0xc8, 0xb7, 0x94, 0xcf, 0x20, 0xfa, 0x5e, 0x49, - 0x4a, 0x50, 0xbf, 0x52, 0xac, 0x56, 0x27, 0x47, 0xec, 0xf8, 0x61, 0xc2, 0xba, 0xca, 0xd7, 0xfa, - 0x68, 0x69, 0xe3, 0x32, 0x67, 0xcc, 0x3d, 0x08, 0x67, 0x56, 0x30, 0x19, 0x29, 0xa7, 0x10, 0x84, - 0x1f, 0x85, 0x0e, 0xbc, 0x92, 0x97, 0xb0, 0x5c, 0x90, 0x6c, 0x1d, 0x97, 0x75, 0x93, 0x57, 0x18, - 0x79, 0x92, 0xbb, 0x45, 0x9a, 0x8f, 0x5f, 0xe6, 0x20, 0x9c, 0x31, 0xb8, 0x6c, 0xd5, 0xaf, 0xf3, - 0x4f, 0x19, 0xe1, 0xfd, 0xfe, 0x51, 0x3f, 0xad, 0xdf, 0x3e, 0x0b, 0x98, 0xc3, 0xe5, 0x0c, 0x7f, - 0x87, 0xaa, 0x49, 0x68, 0x43, 0x26, 0x59, 0xd3, 0xd8, 0x35, 0xba, 0xb5, 0xa7, 0x1f, 0x90, 0x65, - 0xb9, 0x33, 0x06, 0x12, 0x4c, 0x46, 0x89, 0x42, 0x90, 0xc4, 0x9a, 0x5c, 0xec, 0x91, 0xcf, 0x06, - 0xe7, 0xe0, 0xc8, 0x13, 0x90, 0xcc, 0xc6, 0x97, 0xf3, 0x76, 0x29, 0x9e, 0xb7, 0xd1, 0x52, 0x47, - 0x33, 0x54, 0xcc, 0xd1, 0x8e, 0xe7, 0x0f, 0xe1, 0xcc, 0x0f, 0xfc, 0xa9, 0x3f, 0x9a, 0x35, 0xb7, - 0x14, 0xcb, 0x87, 0x9b, 0xb1, 0x1c, 0xb3, 0x01, 0x4c, 0xfb, 0x30, 0x05, 0x47, 0xfa, 0xa1, 0x5d, - 0x8f, 0xe7, 0xed, 0x9d, 0xd3, 0x1c, 0x18, 0xbd, 0x06, 0x8d, 0x0f, 0x50, 0x5d, 0xcf, 0xc7, 0xfe, - 0x94, 0x09, 0x71, 0xca, 0x5c, 0x68, 0x96, 0x77, 0x8d, 0xee, 0x03, 0xbb, 0xa9, 0x43, 0xac, 0xf7, - 0x57, 0xde, 0x69, 0xc1, 0x03, 0x7f, 0x85, 0xaa, 0x8e, 0x2e, 0x4f, 0xb3, 0xa2, 0x82, 0x25, 0xb7, - 0x05, 0x4b, 0x16, 0x13, 0x41, 0xbe, 0x88, 0x98, 0x27, 0xb9, 0x9c, 0xd9, 0x3b, 0xf1, 0xbc, 0x5d, - 0x5d, 0x94, 0x98, 0x66, 0x68, 0x58, 0xa0, 0x87, 0x2e, 0xfb, 0x81, 0xbb, 0x91, 0xfb, 0xa5, 0x3f, - 0x8d, 0x5c, 0xe8, 0xf3, 0x1f, 0xa1, 0xb9, 0x7d, 0x27, 0x8a, 0xb7, 0xe3, 0x79, 0xfb, 0xe1, 0xc9, - 0x2a, 0x18, 0x2d, 0xe2, 0x77, 0x7e, 0x37, 0xd0, 0xa3, 0x62, 0xe3, 0x8f, 0xb9, 0x90, 0xf8, 0xdb, - 0x42, 0xf3, 0xc9, 0x86, 0x6d, 0xe1, 0x22, 0x6d, 0x7d, 0x5d, 0xd7, 0xb5, 0xba, 0xd0, 0xe4, 0x1a, - 0x7f, 0x86, 0xb6, 0xb9, 0x04, 0x57, 0x34, 0xb7, 0x76, 0xcb, 0xdd, 0xda, 0x53, 0x8b, 0xdc, 0xb2, - 0xc6, 0xa4, 0x18, 0xa1, 0xfd, 0x86, 0xc6, 0xde, 0x3e, 0x4a, 0x50, 0x68, 0x0a, 0xd6, 0xf9, 0x6d, - 0x0b, 0xd5, 0xd3, 0xec, 0x7a, 0x52, 0x32, 0x67, 0xec, 0x82, 0x27, 0x5f, 0xc3, 0x14, 0xf7, 0x51, - 0x45, 0x04, 0xe0, 0xe8, 0xe9, 0xdd, 0xbb, 0x35, 0x97, 0xd5, 0xf0, 0xfa, 0x01, 0x38, 0xf6, 0x8e, - 0x86, 0xaf, 0x24, 0x12, 0x55, 0x60, 0xf8, 0x1b, 0x74, 0x4f, 0x48, 0x26, 0x23, 0xa1, 0xa6, 0xf4, - 0xfa, 0x52, 0x6c, 0x00, 0xab, 0x5c, 0xed, 0x37, 0x35, 0xf0, 0xbd, 0x54, 0xa6, 0x1a, 0xb2, 0x73, - 0x69, 0xa0, 0xc6, 0xaa, 0xcb, 0x6b, 0xe8, 0x3a, 0xbd, 0xde, 0xf5, 0x27, 0xaf, 0x94, 0xd2, 0x0d, - 0x3d, 0xff, 0xd3, 0x40, 0x8f, 0x0a, 0xd9, 0xab, 0x85, 0xc0, 0xc7, 0xa8, 0x11, 0x40, 0x28, 0xb8, - 0x90, 0xe0, 0xc9, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, 0x6f, 0x37, 0x3e, 0x5f, 0xf3, - 0x4e, 0xd7, 0x7a, 0xe1, 0x73, 0x54, 0xe7, 0xde, 0x94, 0x7b, 0xa0, 0xf7, 0x67, 0xd9, 0xf1, 0x6e, - 0x3e, 0x8f, 0xe4, 0x8f, 0x23, 0x29, 0xc8, 0x2a, 0xb2, 0x6a, 0x74, 0x23, 0x39, 0x33, 0x47, 0x2b, - 0x28, 0xb4, 0x80, 0xdb, 0xf9, 0x63, 0x4d, 0x7f, 0x92, 0x07, 0xfc, 0x3e, 0xaa, 0x32, 0xa5, 0x81, - 0x50, 0xa7, 0x91, 0xd5, 0xbb, 0xa7, 0xf5, 0x34, 0xb3, 0x50, 0x33, 0xa4, 0x4a, 0xb1, 0xe6, 0xb0, - 0x6e, 0x30, 0x43, 0xca, 0x35, 0x37, 0x43, 0x4a, 0xa6, 0x1a, 0x32, 0x09, 0x25, 0x39, 0xb0, 0xb9, - 0x43, 0x9a, 0x85, 0x72, 0xaa, 0xf5, 0x34, 0xb3, 0xe8, 0xfc, 0x57, 0x5e, 0xd3, 0x26, 0x35, 0x8c, - 0xb9, 0x9c, 0x86, 0x2a, 0xa7, 0x6a, 0x21, 0xa7, 0x61, 0x96, 0xd3, 0x10, 0xff, 0x6a, 0x20, 0xcc, - 0x32, 0x88, 0x93, 0xc5, 0xb0, 0xa6, 0x13, 0xf5, 0xe9, 0x1d, 0x96, 0x84, 0xf4, 0x0a, 0x68, 0x87, - 0x9e, 0x0c, 0x67, 0x76, 0x4b, 0x47, 0x81, 0x8b, 0x06, 0x74, 0x4d, 0x08, 0xf8, 0x1c, 0xd5, 0x52, - 0xed, 0x61, 0x18, 0xfa, 0xa1, 0x5e, 0xdb, 0xee, 0x06, 0x11, 0x29, 0x7b, 0xdb, 0x8c, 0xe7, 0xed, - 0x5a, 0x6f, 0x09, 0xf0, 0xef, 0xbc, 0x5d, 0xcb, 0xbd, 0xd3, 0x3c, 0x78, 0xc2, 0x35, 0x84, 0x25, - 0x57, 0xe5, 0x2e, 0x5c, 0x07, 0x70, 0x33, 0x57, 0x0e, 0xbc, 0x75, 0x88, 0xde, 0xb9, 0xa1, 0x44, - 0xb8, 0x8e, 0xca, 0x13, 0x98, 0xa5, 0x93, 0x48, 0x93, 0x9f, 0xb8, 0x81, 0xb6, 0x2f, 0xd8, 0x34, - 0x4a, 0x27, 0xee, 0x01, 0x4d, 0x85, 0x4f, 0xb6, 0x9e, 0x19, 0x9d, 0x9f, 0x0d, 0x94, 0xe7, 0xc0, - 0xc7, 0xa8, 0x92, 0x7c, 0x93, 0xe8, 0x33, 0xf3, 0xde, 0x66, 0x67, 0xe6, 0x8c, 0xbb, 0xb0, 0x3c, - 0x97, 0x89, 0x44, 0x15, 0x0a, 0x7e, 0x17, 0xdd, 0x77, 0x41, 0x08, 0x36, 0xd2, 0xcc, 0xf6, 0x5b, - 0xda, 0xe8, 0xfe, 0x49, 0xaa, 0xa6, 0x8b, 0x77, 0xbb, 0x77, 0x79, 0x65, 0x96, 0x9e, 0x5f, 0x99, - 0xa5, 0x17, 0x57, 0x66, 0xe9, 0xa7, 0xd8, 0x34, 0x2e, 0x63, 0xd3, 0x78, 0x1e, 0x9b, 0xc6, 0x8b, - 0xd8, 0x34, 0xfe, 0x8a, 0x4d, 0xe3, 0x97, 0xbf, 0xcd, 0xd2, 0xd7, 0x8f, 0x6f, 0xf9, 0x0a, 0xfd, - 0x3f, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x8d, 0x17, 0x01, 0xbc, 0x0a, 0x00, 0x00, + // 1023 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0x23, 0x45, + 0x17, 0x4e, 0xe7, 0x32, 0xe3, 0xa9, 0xe4, 0xff, 0xc7, 0x53, 0xca, 0x0c, 0xc6, 0x23, 0xb5, 0x23, + 0xaf, 0x0c, 0x62, 0xba, 0x49, 0x40, 0x68, 0x84, 0xc4, 0xc2, 0x9d, 0x64, 0x11, 0x91, 0x84, 0xa1, + 0x1c, 0x01, 0x02, 0x16, 0x94, 0xdb, 0x07, 0xbb, 0x62, 0xf7, 0x45, 0x55, 0xd5, 0x16, 0x66, 0xc5, + 0x8a, 0x35, 0x3b, 0xde, 0x80, 0x67, 0xc9, 0x02, 0x89, 0xd1, 0xac, 0x66, 0x65, 0x91, 0x86, 0x67, + 0x60, 0xc1, 0x06, 0xd4, 0xd5, 0xe5, 0x76, 0xc7, 0x6d, 0x07, 0x27, 0x8b, 0xec, 0x5c, 0xe7, 0xf2, + 0x9d, 0xdb, 0x77, 0x4e, 0x27, 0xe8, 0xa0, 0xff, 0x5c, 0x58, 0x2c, 0xb0, 0xfb, 0x51, 0x1b, 0xb8, + 0x0f, 0x12, 0x84, 0x3d, 0x04, 0xbf, 0x13, 0x70, 0x5b, 0x2b, 0x68, 0xc8, 0x6c, 0x21, 0x03, 0x4e, + 0xbb, 0x60, 0x0f, 0x77, 0xe9, 0x20, 0xec, 0xd1, 0x5d, 0xbb, 0x0b, 0x3e, 0x70, 0x2a, 0xa1, 0x63, + 0x85, 0x3c, 0x90, 0x01, 0x7e, 0x9a, 0x1a, 0x5b, 0x34, 0x64, 0x96, 0x36, 0xb6, 0x26, 0xc6, 0xd5, + 0x67, 0x5d, 0x26, 0x7b, 0x51, 0xdb, 0x72, 0x03, 0xcf, 0xee, 0x06, 0xdd, 0xc0, 0x56, 0x3e, 0xed, + 0xe8, 0x5b, 0xf5, 0x52, 0x0f, 0xf5, 0x2b, 0xc5, 0xaa, 0xd6, 0x73, 0x81, 0xdd, 0x80, 0x27, 0x51, + 0x67, 0xe3, 0x55, 0xdf, 0x9f, 0xda, 0x78, 0xd4, 0xed, 0x31, 0x1f, 0xf8, 0xc8, 0x0e, 0xfb, 0x5d, + 0xe5, 0xc4, 0x41, 0x04, 0x11, 0x77, 0xe1, 0x46, 0x5e, 0xc2, 0xf6, 0x40, 0xd2, 0x79, 0xb1, 0xec, + 0x45, 0x5e, 0x3c, 0xf2, 0x25, 0xf3, 0x8a, 0x61, 0x3e, 0xf8, 0x2f, 0x07, 0xe1, 0xf6, 0xc0, 0xa3, + 0xb3, 0x7e, 0xf5, 0xbf, 0xd6, 0x10, 0xde, 0x6f, 0x1d, 0xb5, 0xd2, 0xfe, 0xed, 0xd3, 0x90, 0xba, + 0x4c, 0x8e, 0xf0, 0x37, 0xa8, 0x94, 0xa4, 0xd6, 0xa1, 0x92, 0x56, 0x8c, 0x1d, 0xa3, 0xb1, 0xb9, + 0xf7, 0xae, 0x35, 0x6d, 0x77, 0x16, 0xc1, 0x0a, 0xfb, 0xdd, 0x44, 0x20, 0xac, 0xc4, 0xda, 0x1a, + 0xee, 0x5a, 0x9f, 0xb4, 0xcf, 0xc1, 0x95, 0x27, 0x20, 0xa9, 0x83, 0x2f, 0xc6, 0xb5, 0x95, 0x78, + 0x5c, 0x43, 0x53, 0x19, 0xc9, 0x50, 0x31, 0x43, 0x5b, 0x7e, 0xd0, 0x81, 0xb3, 0x20, 0x0c, 0x06, + 0x41, 0x77, 0x54, 0x59, 0x55, 0x51, 0xde, 0x5b, 0x2e, 0xca, 0x31, 0x6d, 0xc3, 0xa0, 0x05, 0x03, + 0x70, 0x65, 0xc0, 0x9d, 0x72, 0x3c, 0xae, 0x6d, 0x9d, 0xe6, 0xc0, 0xc8, 0x15, 0x68, 0x7c, 0x80, + 0xca, 0x9a, 0x1f, 0xfb, 0x03, 0x2a, 0xc4, 0x29, 0xf5, 0xa0, 0xb2, 0xb6, 0x63, 0x34, 0x1e, 0x38, + 0x15, 0x9d, 0x62, 0xb9, 0x35, 0xa3, 0x27, 0x05, 0x0f, 0xfc, 0x05, 0x2a, 0xb9, 0xba, 0x3d, 0x95, + 0x75, 0x95, 0xac, 0x75, 0x5d, 0xb2, 0xd6, 0x84, 0x11, 0xd6, 0xa7, 0x11, 0xf5, 0x25, 0x93, 0x23, + 0x67, 0x2b, 0x1e, 0xd7, 0x4a, 0x93, 0x16, 0x93, 0x0c, 0x0d, 0x0b, 0xf4, 0xc8, 0xa3, 0xdf, 0x31, + 0x2f, 0xf2, 0x3e, 0x0b, 0x06, 0x91, 0x07, 0x2d, 0xf6, 0x3d, 0x54, 0x36, 0x6e, 0x15, 0xe2, 0x71, + 0x3c, 0xae, 0x3d, 0x3a, 0x99, 0x05, 0x23, 0x45, 0xfc, 0xfa, 0xaf, 0x06, 0x7a, 0x52, 0x1c, 0xfc, + 0x31, 0x13, 0x12, 0x7f, 0x5d, 0x18, 0xbe, 0xb5, 0xe4, 0x58, 0x98, 0x48, 0x47, 0x5f, 0xd6, 0x7d, + 0x2d, 0x4d, 0x24, 0xb9, 0xc1, 0x9f, 0xa1, 0x0d, 0x26, 0xc1, 0x13, 0x95, 0xd5, 0x9d, 0xb5, 0xc6, + 0xe6, 0x9e, 0x6d, 0x5d, 0xb3, 0xc6, 0x56, 0x31, 0x43, 0xe7, 0x7f, 0x1a, 0x7b, 0xe3, 0x28, 0x41, + 0x21, 0x29, 0x58, 0xfd, 0x97, 0x55, 0x54, 0x4e, 0xab, 0x6b, 0x4a, 0x49, 0xdd, 0x9e, 0x07, 0xbe, + 0xbc, 0x03, 0x16, 0xb7, 0xd0, 0xba, 0x08, 0xc1, 0xd5, 0xec, 0xdd, 0xbd, 0xb6, 0x96, 0xd9, 0xf4, + 0x5a, 0x21, 0xb8, 0xce, 0x96, 0x86, 0x5f, 0x4f, 0x5e, 0x44, 0x81, 0xe1, 0xaf, 0xd0, 0x3d, 0x21, + 0xa9, 0x8c, 0x84, 0x62, 0xe9, 0xd5, 0xa5, 0x58, 0x02, 0x56, 0xb9, 0x3a, 0xff, 0xd7, 0xc0, 0xf7, + 0xd2, 0x37, 0xd1, 0x90, 0xf5, 0x0b, 0x03, 0x6d, 0xcf, 0xba, 0xdc, 0xc1, 0xd4, 0xc9, 0xd5, 0xa9, + 0x3f, 0xbb, 0x51, 0x49, 0x0b, 0x66, 0xfe, 0xca, 0x40, 0x4f, 0x0a, 0xd5, 0xab, 0x85, 0xc0, 0xc7, + 0x68, 0x3b, 0x04, 0x2e, 0x98, 0x90, 0xe0, 0xcb, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, + 0xae, 0x6d, 0xbf, 0x98, 0xa3, 0x27, 0x73, 0xbd, 0xf0, 0x39, 0x2a, 0x33, 0x7f, 0xc0, 0x7c, 0xd0, + 0xfb, 0x33, 0x9d, 0x78, 0x23, 0x5f, 0x47, 0xf2, 0xe1, 0x48, 0x1a, 0x32, 0x8b, 0xac, 0x06, 0xbd, + 0x9d, 0x9c, 0x99, 0xa3, 0x19, 0x14, 0x52, 0xc0, 0xad, 0xff, 0x36, 0x67, 0x3e, 0x89, 0x02, 0xbf, + 0x83, 0x4a, 0x54, 0x49, 0x80, 0xeb, 0x32, 0xb2, 0x7e, 0x37, 0xb5, 0x9c, 0x64, 0x16, 0x8a, 0x43, + 0xaa, 0x15, 0x73, 0x0e, 0xeb, 0x12, 0x1c, 0x52, 0xae, 0x39, 0x0e, 0xa9, 0x37, 0xd1, 0x90, 0x49, + 0x2a, 0xc9, 0x81, 0xcd, 0x1d, 0xd2, 0x2c, 0x95, 0x53, 0x2d, 0x27, 0x99, 0x45, 0xfd, 0x9f, 0xb5, + 0x39, 0x63, 0x52, 0x64, 0xcc, 0xd5, 0xd4, 0x51, 0x35, 0x95, 0x0a, 0x35, 0x75, 0xb2, 0x9a, 0x3a, + 0xf8, 0x67, 0x03, 0x61, 0x9a, 0x41, 0x9c, 0x4c, 0xc8, 0x9a, 0x32, 0xea, 0xe3, 0x5b, 0x2c, 0x89, + 0xd5, 0x2c, 0xa0, 0x1d, 0xfa, 0x92, 0x8f, 0x9c, 0xaa, 0xce, 0x02, 0x17, 0x0d, 0xc8, 0x9c, 0x14, + 0xf0, 0x39, 0xda, 0x4c, 0xa5, 0x87, 0x9c, 0x07, 0x5c, 0xaf, 0x6d, 0x63, 0x89, 0x8c, 0x94, 0xbd, + 0x63, 0xc6, 0xe3, 0xda, 0x66, 0x73, 0x0a, 0xf0, 0xf7, 0xb8, 0xb6, 0x99, 0xd3, 0x93, 0x3c, 0x78, + 0x12, 0xab, 0x03, 0xd3, 0x58, 0xeb, 0xb7, 0x89, 0x75, 0x00, 0x8b, 0x63, 0xe5, 0xc0, 0xab, 0x87, + 0xe8, 0x8d, 0x05, 0x2d, 0xc2, 0x65, 0xb4, 0xd6, 0x87, 0x51, 0xca, 0x44, 0x92, 0xfc, 0xc4, 0xdb, + 0x68, 0x63, 0x48, 0x07, 0x51, 0xca, 0xb8, 0x07, 0x24, 0x7d, 0x7c, 0xb8, 0xfa, 0xdc, 0xa8, 0xff, + 0xb9, 0x8a, 0x1e, 0x67, 0x13, 0xe0, 0xac, 0x1d, 0x49, 0x10, 0xea, 0xc3, 0x7a, 0x07, 0x17, 0x7a, + 0x0f, 0xa1, 0x0e, 0x67, 0x43, 0xe0, 0x8a, 0xad, 0x2a, 0xb5, 0xa9, 0xc7, 0x41, 0xa6, 0x21, 0x39, + 0x2b, 0x3c, 0x44, 0x28, 0xa4, 0x9c, 0x7a, 0x20, 0x81, 0x27, 0x47, 0x38, 0xe1, 0x97, 0xb3, 0x1c, + 0xbf, 0xf2, 0xd5, 0x59, 0x2f, 0x32, 0x90, 0x94, 0x56, 0x59, 0xdc, 0xa9, 0x82, 0xe4, 0x22, 0x55, + 0x3f, 0x42, 0x0f, 0x67, 0x5c, 0x6e, 0xd4, 0xe6, 0x57, 0x06, 0x7a, 0x73, 0x6e, 0x22, 0x77, 0x70, + 0xdf, 0x3f, 0xbf, 0x7a, 0xdf, 0xf7, 0x6e, 0xde, 0xad, 0x05, 0x47, 0xfe, 0x47, 0x03, 0xe5, 0xf9, + 0x89, 0x8f, 0xd1, 0x7a, 0xf2, 0xf7, 0xac, 0x2e, 0xe1, 0xed, 0xe5, 0x4a, 0x38, 0x63, 0x1e, 0x4c, + 0x3f, 0xb5, 0xc9, 0x8b, 0x28, 0x14, 0xfc, 0x16, 0xba, 0xef, 0x81, 0x10, 0xb4, 0x3b, 0xa1, 0xc6, + 0x43, 0x6d, 0x74, 0xff, 0x24, 0x15, 0x93, 0x89, 0xde, 0x69, 0x5e, 0x5c, 0x9a, 0x2b, 0x2f, 0x2f, + 0xcd, 0x95, 0xd7, 0x97, 0xe6, 0xca, 0x0f, 0xb1, 0x69, 0x5c, 0xc4, 0xa6, 0xf1, 0x32, 0x36, 0x8d, + 0xd7, 0xb1, 0x69, 0xfc, 0x1e, 0x9b, 0xc6, 0x4f, 0x7f, 0x98, 0x2b, 0x5f, 0x3e, 0xbd, 0xe6, 0x3f, + 0x98, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x2f, 0x75, 0xee, 0xf8, 0x0c, 0x00, 0x00, } func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { @@ -734,6 +799,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -915,6 +1089,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -1038,6 +1250,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -2198,6 +2448,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 88250a0f0..49e522be5 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -216,6 +216,46 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go index 779c85802..a70f8e186 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/register.go +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachmentList{}, &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 59ef348a3..5957e4807 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -251,3 +251,55 @@ type CSIStorageCapacityList struct { // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index ba6afbd59..ac87dbdca 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -103,6 +103,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index d9bc94b25..942871f78 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -238,6 +238,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index 41114c3c6..c169e782c 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -120,3 +120,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 24 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 2b354dd47..b99fd39e4 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -91,7 +91,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 4c39b49cc..0f5ade3c1 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -311,7 +311,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 0f2718b9c..6d9d23306 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index cc1c7437f..8c4e147f0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -210,6 +210,19 @@ type ValidationRule struct { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // Rule string // Message represents the message displayed when validation fails. The message is required if the Rule contains // line breaks. The message must not contain line breaks. @@ -246,6 +259,24 @@ type ValidationRule struct { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional FieldPath string + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index 75a573a2d..6c22a5169 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -814,202 +814,204 @@ func init() { } var fileDescriptor_f5a35c9667703937 = []byte{ - // 3111 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdd, 0x6f, 0x5c, 0x47, - 0x15, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0x6c, 0x6e, 0xdc, 0xc4, 0xeb, 0x6c, - 0x68, 0x70, 0xdb, 0x74, 0xdd, 0x9a, 0x96, 0x86, 0x82, 0x40, 0x5e, 0xdb, 0x69, 0xdd, 0xd8, 0xb1, - 0x35, 0x9b, 0xa4, 0x6e, 0x8b, 0x68, 0xaf, 0xf7, 0x8e, 0xd7, 0xb7, 0xbe, 0x5f, 0x99, 0xb9, 0xd7, - 0x1f, 0x12, 0x48, 0x15, 0xa8, 0x02, 0x2a, 0x41, 0x79, 0xa8, 0xca, 0x13, 0x42, 0x08, 0xf5, 0x01, - 0x1e, 0xe0, 0x0d, 0xfe, 0x85, 0xbe, 0x20, 0xf5, 0x09, 0x55, 0x42, 0x5a, 0xd1, 0xe5, 0x1f, 0x40, - 0x02, 0x84, 0xf0, 0x03, 0x42, 0xf3, 0x71, 0xe7, 0xce, 0xde, 0xdd, 0x4d, 0x22, 0x7b, 0xdd, 0xbe, - 0xed, 0x9e, 0x73, 0xe6, 0xfc, 0xce, 0x9c, 0x39, 0x73, 0xe6, 0xcc, 0xb9, 0x03, 0xac, 0xdd, 0x1b, - 0xb4, 0xec, 0x04, 0x73, 0xbb, 0xf1, 0x16, 0x26, 0x3e, 0x8e, 0x30, 0x9d, 0xdb, 0xc3, 0xbe, 0x1d, - 0x90, 0x39, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x44, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0xa7, 0xad, 0xd0, - 0xa1, 0x98, 0xec, 0x61, 0x32, 0x17, 0xee, 0xd6, 0x19, 0x8f, 0xb6, 0x0a, 0xcc, 0xed, 0x3d, 0x3b, - 0x57, 0xc7, 0x3e, 0x26, 0x56, 0x84, 0xed, 0x72, 0x48, 0x82, 0x28, 0x80, 0x37, 0x84, 0xa6, 0x72, - 0x8b, 0xe0, 0x9b, 0x4a, 0x53, 0x39, 0xdc, 0xad, 0x33, 0x1e, 0x6d, 0x15, 0x28, 0xef, 0x3d, 0x3b, - 0xf5, 0x74, 0xdd, 0x89, 0x76, 0xe2, 0xad, 0x72, 0x2d, 0xf0, 0xe6, 0xea, 0x41, 0x3d, 0x98, 0xe3, - 0x0a, 0xb7, 0xe2, 0x6d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xd0, 0xd4, 0x73, 0xa9, 0xc9, 0x9e, - 0x55, 0xdb, 0x71, 0x7c, 0x4c, 0x0e, 0x53, 0x3b, 0x3d, 0x1c, 0x59, 0x1d, 0xcc, 0x9b, 0x9a, 0xeb, - 0x36, 0x8a, 0xc4, 0x7e, 0xe4, 0x78, 0xb8, 0x6d, 0xc0, 0xd7, 0x1e, 0x36, 0x80, 0xd6, 0x76, 0xb0, - 0x67, 0x65, 0xc7, 0x95, 0x8e, 0x0c, 0x30, 0xbe, 0x18, 0xf8, 0x7b, 0x98, 0xb0, 0x09, 0x22, 0x7c, - 0x3f, 0xc6, 0x34, 0x82, 0x15, 0xd0, 0x17, 0x3b, 0xb6, 0x69, 0xcc, 0x18, 0xb3, 0x43, 0x95, 0x67, - 0x3e, 0x6e, 0x14, 0xcf, 0x34, 0x1b, 0xc5, 0xbe, 0xbb, 0x2b, 0x4b, 0x47, 0x8d, 0xe2, 0x95, 0x6e, - 0x48, 0xd1, 0x61, 0x88, 0x69, 0xf9, 0xee, 0xca, 0x12, 0x62, 0x83, 0xe1, 0x4b, 0x60, 0xdc, 0xc6, - 0xd4, 0x21, 0xd8, 0x5e, 0xd8, 0x58, 0xb9, 0x27, 0xf4, 0x9b, 0x39, 0xae, 0xf1, 0xa2, 0xd4, 0x38, - 0xbe, 0x94, 0x15, 0x40, 0xed, 0x63, 0xe0, 0x26, 0x18, 0x0c, 0xb6, 0xde, 0xc6, 0xb5, 0x88, 0x9a, - 0x7d, 0x33, 0x7d, 0xb3, 0xc3, 0xf3, 0x4f, 0x97, 0xd3, 0xc5, 0x53, 0x26, 0xf0, 0x15, 0x93, 0x93, - 0x2d, 0x23, 0x6b, 0x7f, 0x39, 0x59, 0xb4, 0xca, 0xa8, 0x44, 0x1b, 0x5c, 0x17, 0x5a, 0x50, 0xa2, - 0xae, 0xf4, 0x9b, 0x1c, 0x80, 0xfa, 0xe4, 0x69, 0x18, 0xf8, 0x14, 0xf7, 0x64, 0xf6, 0x14, 0x8c, - 0xd5, 0xb8, 0xe6, 0x08, 0xdb, 0x12, 0xd7, 0xcc, 0x1d, 0xc7, 0x7a, 0x53, 0xe2, 0x8f, 0x2d, 0x66, - 0xd4, 0xa1, 0x36, 0x00, 0x78, 0x07, 0x0c, 0x10, 0x4c, 0x63, 0x37, 0x32, 0xfb, 0x66, 0x8c, 0xd9, - 0xe1, 0xf9, 0xeb, 0x5d, 0xa1, 0x78, 0x68, 0xb3, 0xe0, 0x2b, 0xef, 0x3d, 0x5b, 0xae, 0x46, 0x56, - 0x14, 0xd3, 0xca, 0x39, 0x89, 0x34, 0x80, 0xb8, 0x0e, 0x24, 0x75, 0x95, 0xfe, 0x67, 0x80, 0x31, - 0xdd, 0x4b, 0x7b, 0x0e, 0xde, 0x87, 0x04, 0x0c, 0x12, 0x11, 0x2c, 0xdc, 0x4f, 0xc3, 0xf3, 0xb7, - 0xca, 0xc7, 0xdd, 0x51, 0xe5, 0xb6, 0xf8, 0xab, 0x0c, 0xb3, 0xe5, 0x92, 0x7f, 0x50, 0x02, 0x04, - 0xf7, 0x40, 0x81, 0xc8, 0x35, 0xe2, 0x81, 0x34, 0x3c, 0xbf, 0xda, 0x1b, 0x50, 0xa1, 0xb3, 0x32, - 0xd2, 0x6c, 0x14, 0x0b, 0xc9, 0x3f, 0xa4, 0xb0, 0x4a, 0xbf, 0xca, 0x81, 0xe9, 0xc5, 0x98, 0x46, - 0x81, 0x87, 0x30, 0x0d, 0x62, 0x52, 0xc3, 0x8b, 0x81, 0x1b, 0x7b, 0xfe, 0x12, 0xde, 0x76, 0x7c, - 0x27, 0x62, 0x31, 0x3a, 0x03, 0xfa, 0x7d, 0xcb, 0xc3, 0x32, 0x66, 0x46, 0xa4, 0x27, 0xfb, 0x6f, - 0x5b, 0x1e, 0x46, 0x9c, 0xc3, 0x24, 0x58, 0x88, 0xc8, 0x1d, 0xa0, 0x24, 0xee, 0x1c, 0x86, 0x18, - 0x71, 0x0e, 0xbc, 0x06, 0x06, 0xb6, 0x03, 0xe2, 0x59, 0x62, 0xf5, 0x86, 0xd2, 0xf5, 0xb8, 0xc9, - 0xa9, 0x48, 0x72, 0xe1, 0xf3, 0x60, 0xd8, 0xc6, 0xb4, 0x46, 0x9c, 0x90, 0x41, 0x9b, 0xfd, 0x5c, - 0xf8, 0xbc, 0x14, 0x1e, 0x5e, 0x4a, 0x59, 0x48, 0x97, 0x83, 0xd7, 0x41, 0x21, 0x24, 0x4e, 0x40, - 0x9c, 0xe8, 0xd0, 0xcc, 0xcf, 0x18, 0xb3, 0xf9, 0xca, 0x98, 0x1c, 0x53, 0xd8, 0x90, 0x74, 0xa4, - 0x24, 0x98, 0xf4, 0xdb, 0x34, 0xf0, 0x37, 0xac, 0x68, 0xc7, 0x1c, 0xe0, 0x08, 0x4a, 0xfa, 0x95, - 0xea, 0xfa, 0x6d, 0x46, 0x47, 0x4a, 0xa2, 0xf4, 0x17, 0x03, 0x98, 0x59, 0x0f, 0x25, 0xee, 0x85, - 0x37, 0x41, 0x81, 0x46, 0x2c, 0xe7, 0xd4, 0x0f, 0xa5, 0x7f, 0x9e, 0x4c, 0x54, 0x55, 0x25, 0xfd, - 0xa8, 0x51, 0x9c, 0x4c, 0x47, 0x24, 0x54, 0xee, 0x1b, 0x35, 0x96, 0x85, 0xdc, 0x3e, 0xde, 0xda, - 0x09, 0x82, 0x5d, 0xb9, 0xfa, 0x27, 0x08, 0xb9, 0x57, 0x85, 0xa2, 0x14, 0x53, 0x84, 0x9c, 0x24, - 0xa3, 0x04, 0xa8, 0xf4, 0xdf, 0x5c, 0x76, 0x62, 0xda, 0xa2, 0xbf, 0x05, 0x0a, 0x6c, 0x0b, 0xd9, - 0x56, 0x64, 0xc9, 0x4d, 0xf0, 0xcc, 0xa3, 0x6d, 0x38, 0xb1, 0x5f, 0xd7, 0x70, 0x64, 0x55, 0xa0, - 0x74, 0x05, 0x48, 0x69, 0x48, 0x69, 0x85, 0x07, 0xa0, 0x9f, 0x86, 0xb8, 0x26, 0xe7, 0x7b, 0xef, - 0x04, 0xd1, 0xde, 0x65, 0x0e, 0xd5, 0x10, 0xd7, 0xd2, 0x60, 0x64, 0xff, 0x10, 0x47, 0x84, 0xef, - 0x18, 0x60, 0x80, 0xf2, 0xbc, 0x20, 0x73, 0xc9, 0xe6, 0x29, 0x80, 0x67, 0xf2, 0x8e, 0xf8, 0x8f, - 0x24, 0x6e, 0xe9, 0x5f, 0x39, 0x70, 0xa5, 0xdb, 0xd0, 0xc5, 0xc0, 0xb7, 0xc5, 0x22, 0xac, 0xc8, - 0x7d, 0x25, 0x22, 0xeb, 0x79, 0x7d, 0x5f, 0x1d, 0x35, 0x8a, 0x8f, 0x3f, 0x54, 0x81, 0xb6, 0x01, - 0xbf, 0xae, 0xa6, 0x2c, 0x36, 0xe9, 0x95, 0x56, 0xc3, 0x8e, 0x1a, 0xc5, 0x51, 0x35, 0xac, 0xd5, - 0x56, 0xb8, 0x07, 0xa0, 0x6b, 0xd1, 0xe8, 0x0e, 0xb1, 0x7c, 0x2a, 0xd4, 0x3a, 0x1e, 0x96, 0x9e, - 0x7b, 0xf2, 0xd1, 0x82, 0x82, 0x8d, 0xa8, 0x4c, 0x49, 0x48, 0xb8, 0xda, 0xa6, 0x0d, 0x75, 0x40, - 0x60, 0x39, 0x83, 0x60, 0x8b, 0xaa, 0x34, 0xa0, 0xe5, 0x70, 0x46, 0x45, 0x92, 0x0b, 0x9f, 0x00, - 0x83, 0x1e, 0xa6, 0xd4, 0xaa, 0x63, 0xbe, 0xf7, 0x87, 0xd2, 0x43, 0x71, 0x4d, 0x90, 0x51, 0xc2, - 0x2f, 0xfd, 0xdb, 0x00, 0x97, 0xba, 0x79, 0x6d, 0xd5, 0xa1, 0x11, 0xfc, 0x4e, 0x5b, 0xd8, 0x97, - 0x1f, 0x6d, 0x86, 0x6c, 0x34, 0x0f, 0x7a, 0x95, 0x4a, 0x12, 0x8a, 0x16, 0xf2, 0xfb, 0x20, 0xef, - 0x44, 0xd8, 0x4b, 0x4e, 0x4b, 0xd4, 0xfb, 0xb0, 0xab, 0x9c, 0x95, 0xf0, 0xf9, 0x15, 0x06, 0x84, - 0x04, 0x5e, 0xe9, 0xa3, 0x1c, 0xb8, 0xdc, 0x6d, 0x08, 0xcb, 0xe3, 0x94, 0x39, 0x3b, 0x74, 0x63, - 0x62, 0xb9, 0x32, 0xd8, 0x94, 0xb3, 0x37, 0x38, 0x15, 0x49, 0x2e, 0xcb, 0x9d, 0xd4, 0xf1, 0xeb, - 0xb1, 0x6b, 0x11, 0x19, 0x49, 0x6a, 0xc2, 0x55, 0x49, 0x47, 0x4a, 0x02, 0x96, 0x01, 0xa0, 0x3b, - 0x01, 0x89, 0x38, 0x06, 0xaf, 0x70, 0x86, 0x2a, 0xe7, 0x58, 0x46, 0xa8, 0x2a, 0x2a, 0xd2, 0x24, - 0xd8, 0x41, 0xb2, 0xeb, 0xf8, 0xb6, 0x5c, 0x70, 0xb5, 0x77, 0x6f, 0x39, 0xbe, 0x8d, 0x38, 0x87, - 0xe1, 0xbb, 0x0e, 0x8d, 0x18, 0x45, 0xae, 0x76, 0x8b, 0xc3, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x35, - 0x96, 0x60, 0x03, 0xe2, 0x60, 0x6a, 0x0e, 0xa4, 0xf8, 0x8b, 0x8a, 0x8a, 0x34, 0x89, 0xd2, 0x5f, - 0xfb, 0xbb, 0xc7, 0x07, 0x4b, 0x20, 0xf0, 0x2a, 0xc8, 0xd7, 0x49, 0x10, 0x87, 0xd2, 0x4b, 0xca, - 0xdb, 0x2f, 0x31, 0x22, 0x12, 0x3c, 0xf8, 0x3d, 0x90, 0xf7, 0xe5, 0x84, 0x59, 0x04, 0xbd, 0xda, - 0xfb, 0x65, 0xe6, 0xde, 0x4a, 0xd1, 0x85, 0x23, 0x05, 0x28, 0x7c, 0x0e, 0xe4, 0x69, 0x2d, 0x08, - 0xb1, 0x74, 0xe2, 0x74, 0x22, 0x54, 0x65, 0xc4, 0xa3, 0x46, 0xf1, 0x6c, 0xa2, 0x8e, 0x13, 0x90, - 0x10, 0x86, 0x3f, 0x32, 0x40, 0x41, 0x1e, 0x17, 0xd4, 0x1c, 0xe4, 0xe1, 0xf9, 0x5a, 0xef, 0xed, - 0x96, 0x65, 0x6f, 0xba, 0x66, 0x92, 0x40, 0x91, 0x02, 0x87, 0x3f, 0x30, 0x00, 0xa8, 0xa9, 0xb3, - 0xcb, 0x1c, 0xe2, 0x3e, 0xec, 0xd9, 0x56, 0xd1, 0x4e, 0x45, 0x11, 0x08, 0x69, 0xa9, 0xa4, 0xa1, - 0xc2, 0x2a, 0x98, 0x08, 0x09, 0xe6, 0xba, 0xef, 0xfa, 0xbb, 0x7e, 0xb0, 0xef, 0xdf, 0x74, 0xb0, - 0x6b, 0x53, 0x13, 0xcc, 0x18, 0xb3, 0x85, 0xca, 0x65, 0x69, 0xff, 0xc4, 0x46, 0x27, 0x21, 0xd4, - 0x79, 0x6c, 0xe9, 0xdd, 0xbe, 0x6c, 0xad, 0x95, 0x3d, 0x2f, 0xe0, 0xfb, 0x62, 0xf2, 0x22, 0x0f, - 0x53, 0xd3, 0xe0, 0x0b, 0xf1, 0x46, 0xef, 0x17, 0x42, 0xe5, 0xfa, 0xf4, 0x90, 0x56, 0x24, 0x8a, - 0x34, 0x13, 0xe0, 0x07, 0x06, 0x38, 0x6b, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, 0xb6, 0x71, 0xee, - 0x74, 0xa3, 0x7a, 0x42, 0x1a, 0x74, 0x76, 0x41, 0x47, 0x45, 0xad, 0x46, 0xc0, 0x17, 0xc1, 0x39, - 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x9b, 0x8d, 0xe2, 0xb9, 0x6a, 0x0b, 0x07, - 0x65, 0x24, 0x4b, 0x9f, 0xe4, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, 0x35, 0x30, 0xc0, - 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, 0x89, 0xe1, 0xb3, - 0xe3, 0xa9, 0x8f, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x00, 0x6c, 0x1c, - 0x12, 0xcc, 0x32, 0x92, 0x6d, 0x0e, 0x72, 0x69, 0xb5, 0x3e, 0x4b, 0x8a, 0x83, 0x34, 0x29, 0x78, - 0x13, 0xc0, 0xe4, 0x9f, 0x13, 0xf8, 0xaf, 0x5a, 0xc4, 0x77, 0xfc, 0xba, 0x59, 0xe0, 0x66, 0x4f, - 0xb2, 0xd3, 0x76, 0xa9, 0x8d, 0x8b, 0x3a, 0x8c, 0x80, 0x7b, 0x60, 0x40, 0x5c, 0xa3, 0x79, 0xde, - 0xe8, 0xe1, 0x8e, 0xbb, 0x67, 0xb9, 0x8e, 0xcd, 0xa1, 0x2a, 0x80, 0xbb, 0x87, 0xa3, 0x20, 0x89, - 0x06, 0xdf, 0x33, 0xc0, 0x08, 0x8d, 0xb7, 0x88, 0x94, 0xa6, 0x3c, 0xab, 0x0f, 0xcf, 0xdf, 0xe9, - 0x15, 0x7c, 0x55, 0xd3, 0x5d, 0x19, 0x6b, 0x36, 0x8a, 0x23, 0x3a, 0x05, 0xb5, 0x60, 0xc3, 0x3f, - 0x1a, 0xc0, 0xb4, 0x6c, 0x11, 0xfa, 0x96, 0xbb, 0x41, 0x1c, 0x3f, 0xc2, 0x44, 0x5c, 0x88, 0xc4, - 0xf1, 0xd1, 0xc3, 0x5a, 0x31, 0x7b, 0xcf, 0xaa, 0xcc, 0xc8, 0x95, 0x36, 0x17, 0xba, 0x58, 0x80, - 0xba, 0xda, 0x56, 0xfa, 0x8f, 0x91, 0x4d, 0x2d, 0xda, 0x2c, 0xab, 0x35, 0xcb, 0xc5, 0x70, 0x09, - 0x8c, 0xb1, 0xea, 0x17, 0xe1, 0xd0, 0x75, 0x6a, 0x16, 0xe5, 0xb7, 0x1f, 0x11, 0xdd, 0xea, 0x1a, - 0x5e, 0xcd, 0xf0, 0x51, 0xdb, 0x08, 0xf8, 0x0a, 0x80, 0xa2, 0x2c, 0x6c, 0xd1, 0x23, 0x2a, 0x01, - 0x55, 0xe0, 0x55, 0xdb, 0x24, 0x50, 0x87, 0x51, 0x70, 0x11, 0x8c, 0xbb, 0xd6, 0x16, 0x76, 0xab, - 0xd8, 0xc5, 0xb5, 0x28, 0x20, 0x5c, 0x95, 0xb8, 0x1f, 0x4e, 0x34, 0x1b, 0xc5, 0xf1, 0xd5, 0x2c, - 0x13, 0xb5, 0xcb, 0x97, 0xae, 0x64, 0xf7, 0xb2, 0x3e, 0x71, 0x51, 0x6c, 0x7f, 0x98, 0x03, 0x53, - 0xdd, 0x83, 0x02, 0x7e, 0x5f, 0x95, 0xc6, 0xa2, 0xe2, 0x7b, 0xed, 0x14, 0x42, 0x4f, 0x5e, 0x07, - 0x40, 0xfb, 0x55, 0x00, 0x1e, 0xb2, 0xf3, 0xda, 0x72, 0x93, 0x6b, 0xff, 0xe6, 0x69, 0xa0, 0x33, - 0xfd, 0x95, 0x21, 0x51, 0x05, 0x58, 0x2e, 0x3f, 0xf4, 0x2d, 0x17, 0x97, 0x3e, 0x6a, 0xbb, 0xda, - 0xa6, 0x9b, 0x15, 0xfe, 0xd8, 0x00, 0xa3, 0x41, 0x88, 0xfd, 0x85, 0x8d, 0x95, 0x7b, 0x5f, 0x15, - 0x9b, 0x56, 0x3a, 0x68, 0xe5, 0xf8, 0x26, 0xb2, 0xfb, 0xb5, 0xd0, 0xb5, 0x41, 0x82, 0x90, 0x56, - 0xce, 0x37, 0x1b, 0xc5, 0xd1, 0xf5, 0x56, 0x14, 0x94, 0x85, 0x2d, 0x79, 0x60, 0x62, 0xf9, 0x20, - 0xc2, 0xc4, 0xb7, 0xdc, 0xa5, 0xa0, 0x16, 0x7b, 0xd8, 0x8f, 0x84, 0x8d, 0x99, 0x76, 0x81, 0xf1, - 0x88, 0xed, 0x82, 0xcb, 0xa0, 0x2f, 0x26, 0xae, 0x8c, 0xda, 0x61, 0xd5, 0x04, 0x43, 0xab, 0x88, - 0xd1, 0x4b, 0x57, 0x40, 0x3f, 0xb3, 0x13, 0x5e, 0x04, 0x7d, 0xc4, 0xda, 0xe7, 0x5a, 0x47, 0x2a, - 0x83, 0x4c, 0x04, 0x59, 0xfb, 0x88, 0xd1, 0x4a, 0xff, 0x98, 0x01, 0xa3, 0x99, 0xb9, 0xc0, 0x29, - 0x90, 0x53, 0x9d, 0x35, 0x20, 0x95, 0xe6, 0x56, 0x96, 0x50, 0xce, 0xb1, 0xe1, 0x0b, 0x2a, 0xbb, - 0x0a, 0xd0, 0xa2, 0x3a, 0x2c, 0x38, 0x95, 0x95, 0x65, 0xa9, 0x3a, 0x66, 0x48, 0x92, 0x1e, 0x99, - 0x0d, 0x78, 0x5b, 0xee, 0x0a, 0x61, 0x03, 0xde, 0x46, 0x8c, 0x76, 0xdc, 0x5e, 0x49, 0xd2, 0xac, - 0xc9, 0x3f, 0x42, 0xb3, 0x66, 0xe0, 0x81, 0xcd, 0x9a, 0xab, 0x20, 0x1f, 0x39, 0x91, 0x8b, 0xf9, - 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xd0, 0xc6, 0xdb, 0x56, 0xec, - 0x46, 0xfc, 0x50, 0x1a, 0x9e, 0xff, 0xd6, 0xc9, 0xa2, 0x47, 0x34, 0x33, 0x96, 0x84, 0x4a, 0x94, - 0xe8, 0x86, 0x8f, 0x83, 0x41, 0xcf, 0x3a, 0x70, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, 0xd6, - 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x41, 0xcd, 0x8d, 0xa9, 0xb3, 0x87, 0x25, 0x53, 0x96, - 0x74, 0x2a, 0x09, 0x2e, 0x67, 0xf8, 0xa8, 0x6d, 0x04, 0x07, 0x73, 0x7c, 0x3e, 0x78, 0x58, 0x03, - 0x13, 0x24, 0x94, 0xf0, 0x5a, 0xc1, 0xa4, 0xfc, 0x48, 0x37, 0x30, 0x39, 0xb8, 0x6d, 0x04, 0x7c, - 0x0a, 0x0c, 0x79, 0xd6, 0xc1, 0x2a, 0xf6, 0xeb, 0xd1, 0x8e, 0x79, 0x76, 0xc6, 0x98, 0xed, 0xab, - 0x9c, 0x6d, 0x36, 0x8a, 0x43, 0x6b, 0x09, 0x11, 0xa5, 0x7c, 0x2e, 0xec, 0xf8, 0x52, 0xf8, 0x9c, - 0x26, 0x9c, 0x10, 0x51, 0xca, 0x67, 0x95, 0x49, 0x68, 0x45, 0x6c, 0x5f, 0x99, 0xa3, 0xad, 0x17, - 0xe7, 0x0d, 0x41, 0x46, 0x09, 0x1f, 0xce, 0x82, 0x82, 0x67, 0x1d, 0xf0, 0x3b, 0xa5, 0x39, 0xc6, - 0xd5, 0xf2, 0x86, 0xe2, 0x9a, 0xa4, 0x21, 0xc5, 0xe5, 0x92, 0x8e, 0x2f, 0x24, 0xc7, 0x35, 0x49, - 0x49, 0x43, 0x8a, 0xcb, 0xe2, 0x37, 0xf6, 0x9d, 0xfb, 0x31, 0x16, 0xc2, 0x90, 0x7b, 0x46, 0xc5, - 0xef, 0xdd, 0x94, 0x85, 0x74, 0x39, 0x76, 0xa7, 0xf3, 0x62, 0x37, 0x72, 0x42, 0x17, 0xaf, 0x6f, - 0x9b, 0xe7, 0xb9, 0xff, 0x79, 0x29, 0xbf, 0xa6, 0xa8, 0x48, 0x93, 0x80, 0x6f, 0x81, 0x7e, 0xec, - 0xc7, 0x9e, 0x79, 0x81, 0x1f, 0xdf, 0x27, 0x8d, 0x3e, 0xb5, 0x5f, 0x96, 0xfd, 0xd8, 0x43, 0x5c, - 0x33, 0x7c, 0x01, 0x9c, 0xf5, 0xac, 0x03, 0x96, 0x04, 0x30, 0x89, 0xd8, 0x45, 0x73, 0x82, 0xcf, - 0x7b, 0x9c, 0x15, 0xb1, 0x6b, 0x3a, 0x03, 0xb5, 0xca, 0xf1, 0x81, 0x8e, 0xaf, 0x0d, 0x9c, 0xd4, - 0x06, 0xea, 0x0c, 0xd4, 0x2a, 0xc7, 0x9c, 0x4c, 0xf0, 0xfd, 0xd8, 0x21, 0xd8, 0x36, 0xbf, 0xc4, - 0xeb, 0x5e, 0xd9, 0xdf, 0x15, 0x34, 0xa4, 0xb8, 0xf0, 0x7e, 0xd2, 0x72, 0x30, 0xf9, 0xe6, 0xdb, - 0xe8, 0x59, 0xea, 0x5e, 0x27, 0x0b, 0x84, 0x58, 0x87, 0xe2, 0x54, 0xd1, 0x9b, 0x0d, 0xd0, 0x07, - 0x79, 0xcb, 0x75, 0xd7, 0xb7, 0xcd, 0x8b, 0xdc, 0xe3, 0x3d, 0x3c, 0x2d, 0x54, 0x86, 0x59, 0x60, - 0xfa, 0x91, 0x80, 0x61, 0x78, 0x81, 0xcf, 0x62, 0x61, 0xea, 0xd4, 0xf0, 0xd6, 0x99, 0x7e, 0x24, - 0x60, 0xf8, 0xfc, 0xfc, 0xc3, 0xf5, 0x6d, 0xf3, 0xb1, 0xd3, 0x9b, 0x1f, 0xd3, 0x8f, 0x04, 0x0c, - 0xb4, 0x41, 0x9f, 0x1f, 0x44, 0xe6, 0xa5, 0x5e, 0x9f, 0xbd, 0xfc, 0x34, 0xb9, 0x1d, 0x44, 0x88, - 0xa9, 0x87, 0x3f, 0x35, 0x00, 0x08, 0xd3, 0x48, 0xbc, 0x7c, 0xd2, 0x16, 0x40, 0x06, 0xad, 0x9c, - 0x46, 0xef, 0xb2, 0x1f, 0x91, 0xc3, 0xf4, 0x5e, 0xa3, 0x45, 0xb9, 0x66, 0x00, 0xfc, 0xa5, 0x01, - 0x2e, 0xe8, 0xe5, 0xae, 0xb2, 0x6c, 0x9a, 0xfb, 0x61, 0xbd, 0x87, 0x81, 0x5c, 0x09, 0x02, 0xb7, - 0x62, 0x36, 0x1b, 0xc5, 0x0b, 0x0b, 0x1d, 0x00, 0x51, 0x47, 0x33, 0xe0, 0x6f, 0x0d, 0x30, 0x2e, - 0xb3, 0xa3, 0x66, 0x5c, 0x91, 0xbb, 0xed, 0xad, 0x1e, 0xba, 0x2d, 0x0b, 0x21, 0xbc, 0xa7, 0xbe, - 0x32, 0xb6, 0xf1, 0x51, 0xbb, 0x55, 0xf0, 0x0f, 0x06, 0x18, 0xb1, 0x71, 0x88, 0x7d, 0x1b, 0xfb, - 0x35, 0x66, 0xe6, 0xcc, 0x49, 0xfb, 0x0a, 0x59, 0x33, 0x97, 0x34, 0xed, 0xc2, 0xc2, 0xb2, 0xb4, - 0x70, 0x44, 0x67, 0x1d, 0x35, 0x8a, 0x93, 0xe9, 0x50, 0x9d, 0x83, 0x5a, 0x0c, 0x84, 0x3f, 0x33, - 0xc0, 0x68, 0xea, 0x76, 0x71, 0x40, 0x5c, 0x39, 0x9d, 0x85, 0xe7, 0x25, 0xe8, 0x42, 0x2b, 0x16, - 0xca, 0x82, 0xc3, 0xdf, 0x19, 0xac, 0xda, 0x4a, 0xee, 0x6a, 0xd4, 0x2c, 0x71, 0x0f, 0xbe, 0xde, - 0x4b, 0x0f, 0x2a, 0xe5, 0xc2, 0x81, 0xd7, 0xd3, 0x4a, 0x4e, 0x71, 0x8e, 0x1a, 0xc5, 0x09, 0xdd, - 0x7f, 0x8a, 0x81, 0x74, 0xe3, 0xe0, 0xbb, 0x06, 0x18, 0xc1, 0x69, 0xc1, 0x4c, 0xcd, 0xab, 0x27, - 0x75, 0x5d, 0xc7, 0xf2, 0x5b, 0x5c, 0xa7, 0x35, 0x16, 0x45, 0x2d, 0xb0, 0xac, 0xf6, 0xc3, 0x07, - 0x96, 0x17, 0xba, 0xd8, 0xfc, 0x72, 0xef, 0x6a, 0xbf, 0x65, 0xa1, 0x12, 0x25, 0xba, 0xe1, 0x75, - 0x50, 0xf0, 0x63, 0xd7, 0xb5, 0xb6, 0x5c, 0x6c, 0x3e, 0xce, 0xab, 0x08, 0xd5, 0x5f, 0xbc, 0x2d, - 0xe9, 0x48, 0x49, 0xc0, 0x6d, 0x30, 0x73, 0x70, 0x4b, 0x3d, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, 0xaf, - 0x71, 0x2d, 0x53, 0xcd, 0x46, 0x71, 0x72, 0xb3, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x1b, 0xe0, - 0x31, 0x4d, 0x66, 0xd9, 0xdb, 0xc2, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x85, 0x43, 0xa8, - 0x7d, 0xbc, 0x99, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0x57, 0xc1, 0xa4, 0xc6, 0x5e, 0xf1, 0xa3, 0x75, - 0x52, 0x8d, 0x88, 0xe3, 0xd7, 0xcd, 0x59, 0xae, 0xf7, 0x42, 0xb2, 0xfb, 0x36, 0x35, 0x1e, 0xea, - 0x32, 0x06, 0xbe, 0xdc, 0xa2, 0x8d, 0x7f, 0xb8, 0xb0, 0xc2, 0x5b, 0xf8, 0x90, 0x9a, 0x4f, 0xf0, - 0xe2, 0x82, 0xaf, 0xf3, 0xa6, 0x46, 0x47, 0x5d, 0xe4, 0xe1, 0xb7, 0xc1, 0xf9, 0x0c, 0x87, 0xdd, - 0x2b, 0xcc, 0x27, 0xc5, 0x05, 0x81, 0x55, 0xa2, 0x9b, 0x09, 0x11, 0x75, 0x92, 0x84, 0xdf, 0x04, - 0x50, 0x23, 0xaf, 0x59, 0x21, 0x1f, 0xff, 0x94, 0xb8, 0xab, 0xb0, 0x15, 0xdd, 0x94, 0x34, 0xd4, - 0x41, 0x0e, 0x7e, 0x68, 0xb4, 0xcc, 0x24, 0xbd, 0xcd, 0x52, 0xf3, 0x3a, 0xdf, 0xb0, 0x2f, 0x1f, - 0x3f, 0x00, 0x53, 0x65, 0x28, 0x76, 0xb1, 0xe6, 0x61, 0x0d, 0x05, 0x75, 0x41, 0x9f, 0x62, 0x97, - 0xe9, 0x4c, 0x0e, 0x87, 0x63, 0xa0, 0x6f, 0x17, 0xcb, 0xcf, 0xc6, 0x88, 0xfd, 0x84, 0x6f, 0x82, - 0xfc, 0x9e, 0xe5, 0xc6, 0x49, 0x2b, 0xa0, 0x77, 0x67, 0x3d, 0x12, 0x7a, 0x5f, 0xcc, 0xdd, 0x30, - 0xa6, 0xde, 0x37, 0xc0, 0x64, 0xe7, 0x53, 0xe5, 0x8b, 0xb2, 0xe8, 0x17, 0x06, 0x18, 0x6f, 0x3b, - 0x40, 0x3a, 0x18, 0xe3, 0xb6, 0x1a, 0x73, 0xaf, 0x87, 0x27, 0x81, 0xd8, 0x08, 0xbc, 0xa2, 0xd5, - 0x2d, 0xfb, 0x89, 0x01, 0xc6, 0xb2, 0x89, 0xf9, 0x0b, 0xf2, 0x52, 0xe9, 0xbd, 0x1c, 0x98, 0xec, - 0x5c, 0x83, 0x43, 0x4f, 0x75, 0x17, 0x7a, 0xde, 0xa0, 0xe9, 0xd4, 0xb2, 0x7d, 0xc7, 0x00, 0xc3, - 0x6f, 0x2b, 0xb9, 0xe4, 0x6b, 0x66, 0x2f, 0xbb, 0x42, 0xc9, 0xd1, 0x97, 0x32, 0x28, 0xd2, 0x21, - 0x4b, 0xbf, 0x37, 0xc0, 0x44, 0xc7, 0xe3, 0x1c, 0x5e, 0x03, 0x03, 0x96, 0xeb, 0x06, 0xfb, 0xa2, - 0x9b, 0xa7, 0xb5, 0xe5, 0x17, 0x38, 0x15, 0x49, 0xae, 0xe6, 0xb3, 0xdc, 0xe7, 0xe0, 0xb3, 0xd2, - 0x9f, 0x0c, 0x70, 0xe9, 0x41, 0x51, 0xf7, 0x79, 0xaf, 0xe1, 0x2c, 0x28, 0xc8, 0x62, 0xfb, 0x90, - 0xaf, 0x9f, 0xcc, 0xae, 0x32, 0x23, 0xf0, 0xd7, 0x32, 0xe2, 0x57, 0xe9, 0xd7, 0x06, 0x18, 0xab, - 0x62, 0xb2, 0xe7, 0xd4, 0x30, 0xc2, 0xdb, 0x98, 0x60, 0xbf, 0x86, 0xe1, 0x1c, 0x18, 0xe2, 0x5f, - 0x1b, 0x43, 0xab, 0x96, 0x7c, 0x23, 0x19, 0x97, 0x8e, 0x1e, 0xba, 0x9d, 0x30, 0x50, 0x2a, 0xa3, - 0xbe, 0xa7, 0xe4, 0xba, 0x7e, 0x4f, 0xb9, 0x04, 0xfa, 0xc3, 0xb4, 0x01, 0x5c, 0x60, 0x5c, 0xde, - 0xf3, 0xe5, 0x54, 0xce, 0x0d, 0x48, 0xc4, 0xbb, 0x5c, 0x79, 0xc9, 0x0d, 0x48, 0x84, 0x38, 0xb5, - 0xf4, 0x41, 0x0e, 0x9c, 0x6b, 0xcd, 0xcf, 0x0c, 0x90, 0xc4, 0x6e, 0xdb, 0x07, 0x1c, 0xc6, 0x43, - 0x9c, 0xa3, 0xbf, 0x1b, 0xc8, 0x3d, 0xf8, 0xdd, 0x00, 0x7c, 0x09, 0x8c, 0xcb, 0x9f, 0xcb, 0x07, - 0x21, 0xc1, 0x94, 0x7f, 0x99, 0xec, 0x6b, 0x7d, 0xef, 0xb7, 0x96, 0x15, 0x40, 0xed, 0x63, 0xe0, - 0x37, 0x32, 0x6f, 0x1a, 0xae, 0xa6, 0xef, 0x19, 0x58, 0x6d, 0xc7, 0x4b, 0x87, 0x7b, 0x6c, 0xcb, - 0x2f, 0x13, 0x12, 0x90, 0xcc, 0x43, 0x87, 0x39, 0x30, 0xb4, 0xcd, 0x04, 0x78, 0x9f, 0x3c, 0xdf, - 0xea, 0xf4, 0x9b, 0x09, 0x03, 0xa5, 0x32, 0xa5, 0x3f, 0x1b, 0xe0, 0x7c, 0xf2, 0x1a, 0xc8, 0x75, - 0xb0, 0x1f, 0x2d, 0x06, 0xfe, 0xb6, 0x53, 0x87, 0x17, 0x45, 0xff, 0x53, 0x6b, 0x2a, 0x26, 0xbd, - 0x4f, 0x78, 0x1f, 0x0c, 0x52, 0xb1, 0xd8, 0x32, 0x0e, 0x5f, 0x39, 0x7e, 0x1c, 0x66, 0xa3, 0x46, - 0x94, 0x6f, 0x09, 0x35, 0xc1, 0x61, 0xa1, 0x58, 0xb3, 0x2a, 0xb1, 0x6f, 0xcb, 0x1e, 0xf8, 0x88, - 0x08, 0xc5, 0xc5, 0x05, 0x41, 0x43, 0x8a, 0x5b, 0xfa, 0xa7, 0x01, 0xc6, 0xdb, 0x5e, 0x37, 0xc1, - 0x1f, 0x1a, 0x60, 0xa4, 0xa6, 0x4d, 0x4f, 0x6e, 0xe8, 0xb5, 0x93, 0xbf, 0xa0, 0xd2, 0x94, 0x8a, - 0x1a, 0x48, 0xa7, 0xa0, 0x16, 0x50, 0xb8, 0x09, 0xcc, 0x5a, 0xe6, 0x21, 0x61, 0xe6, 0xd3, 0xe4, - 0xa5, 0x66, 0xa3, 0x68, 0x2e, 0x76, 0x91, 0x41, 0x5d, 0x47, 0x57, 0xbe, 0xfb, 0xf1, 0x67, 0xd3, - 0x67, 0x3e, 0xf9, 0x6c, 0xfa, 0xcc, 0xa7, 0x9f, 0x4d, 0x9f, 0x79, 0xa7, 0x39, 0x6d, 0x7c, 0xdc, - 0x9c, 0x36, 0x3e, 0x69, 0x4e, 0x1b, 0x9f, 0x36, 0xa7, 0x8d, 0xbf, 0x35, 0xa7, 0x8d, 0x9f, 0xff, - 0x7d, 0xfa, 0xcc, 0xeb, 0x37, 0x8e, 0xfb, 0x7c, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d, - 0x01, 0xc1, 0x04, 0x92, 0x2c, 0x00, 0x00, + // 3137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x5c, 0x47, + 0xf5, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0xec, 0xef, 0x8d, 0x9b, 0x78, 0x9d, + 0xcd, 0xb7, 0xc1, 0x6d, 0xd3, 0x75, 0x1b, 0x5a, 0x1a, 0xca, 0x2f, 0x79, 0x6d, 0xa7, 0x75, 0x13, + 0xc7, 0xd6, 0x6c, 0x92, 0xba, 0x2d, 0xa2, 0xbd, 0xde, 0x3b, 0xbb, 0xbe, 0xf5, 0xfd, 0x95, 0x99, + 0x7b, 0xfd, 0x43, 0x02, 0xa9, 0x02, 0x55, 0x40, 0x25, 0x28, 0x0f, 0xa8, 0x3c, 0x21, 0x84, 0x50, + 0x1f, 0xe0, 0x01, 0xde, 0xe0, 0x5f, 0xe8, 0x0b, 0x52, 0x25, 0x24, 0x54, 0x09, 0x69, 0x45, 0x97, + 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xcd, 0x8f, 0x3b, 0x77, 0xf6, 0xee, 0x6e, 0x12, 0xd9, + 0xeb, 0xf6, 0x6d, 0xf7, 0x9c, 0x33, 0xe7, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, 0xce, 0x1d, 0x60, + 0xed, 0x5c, 0xa7, 0x65, 0x27, 0x58, 0xd8, 0x89, 0xb7, 0x30, 0xf1, 0x71, 0x84, 0xe9, 0xc2, 0x2e, + 0xf6, 0xed, 0x80, 0x2c, 0x48, 0x86, 0x15, 0x3a, 0x78, 0x3f, 0xc2, 0x3e, 0x75, 0x02, 0x9f, 0x3e, + 0x6d, 0x85, 0x0e, 0xc5, 0x64, 0x17, 0x93, 0x85, 0x70, 0xa7, 0xc1, 0x78, 0xb4, 0x5d, 0x60, 0x61, + 0xf7, 0xd9, 0x85, 0x06, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x43, 0x12, 0x44, 0x01, 0xbc, 0x2e, + 0x34, 0x95, 0xdb, 0x04, 0xdf, 0x54, 0x9a, 0xca, 0xe1, 0x4e, 0x83, 0xf1, 0x68, 0xbb, 0x40, 0x79, + 0xf7, 0xd9, 0x99, 0xa7, 0x1b, 0x4e, 0xb4, 0x1d, 0x6f, 0x95, 0x6b, 0x81, 0xb7, 0xd0, 0x08, 0x1a, + 0xc1, 0x02, 0x57, 0xb8, 0x15, 0xd7, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x40, 0x33, 0xcf, 0xa5, + 0x26, 0x7b, 0x56, 0x6d, 0xdb, 0xf1, 0x31, 0x39, 0x48, 0xed, 0xf4, 0x70, 0x64, 0x75, 0x31, 0x6f, + 0x66, 0xa1, 0xd7, 0x28, 0x12, 0xfb, 0x91, 0xe3, 0xe1, 0x8e, 0x01, 0x5f, 0x7a, 0xd8, 0x00, 0x5a, + 0xdb, 0xc6, 0x9e, 0x95, 0x1d, 0x57, 0x3a, 0x34, 0xc0, 0xe4, 0x52, 0xe0, 0xef, 0x62, 0xc2, 0x26, + 0x88, 0xf0, 0xfd, 0x18, 0xd3, 0x08, 0x56, 0xc0, 0x40, 0xec, 0xd8, 0xa6, 0x31, 0x67, 0xcc, 0x8f, + 0x54, 0x9e, 0xf9, 0xa8, 0x59, 0x3c, 0xd5, 0x6a, 0x16, 0x07, 0xee, 0xae, 0x2e, 0x1f, 0x36, 0x8b, + 0x97, 0x7a, 0x21, 0x45, 0x07, 0x21, 0xa6, 0xe5, 0xbb, 0xab, 0xcb, 0x88, 0x0d, 0x86, 0x2f, 0x81, + 0x49, 0x1b, 0x53, 0x87, 0x60, 0x7b, 0x71, 0x63, 0xf5, 0x9e, 0xd0, 0x6f, 0xe6, 0xb8, 0xc6, 0xf3, + 0x52, 0xe3, 0xe4, 0x72, 0x56, 0x00, 0x75, 0x8e, 0x81, 0x9b, 0x60, 0x38, 0xd8, 0x7a, 0x1b, 0xd7, + 0x22, 0x6a, 0x0e, 0xcc, 0x0d, 0xcc, 0x8f, 0x5e, 0x7b, 0xba, 0x9c, 0x2e, 0x9e, 0x32, 0x81, 0xaf, + 0x98, 0x9c, 0x6c, 0x19, 0x59, 0x7b, 0x2b, 0xc9, 0xa2, 0x55, 0xc6, 0x25, 0xda, 0xf0, 0xba, 0xd0, + 0x82, 0x12, 0x75, 0xa5, 0x5f, 0xe5, 0x00, 0xd4, 0x27, 0x4f, 0xc3, 0xc0, 0xa7, 0xb8, 0x2f, 0xb3, + 0xa7, 0x60, 0xa2, 0xc6, 0x35, 0x47, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x28, 0xd6, 0x9b, 0x12, 0x7f, + 0x62, 0x29, 0xa3, 0x0e, 0x75, 0x00, 0xc0, 0x3b, 0x60, 0x88, 0x60, 0x1a, 0xbb, 0x91, 0x39, 0x30, + 0x67, 0xcc, 0x8f, 0x5e, 0xbb, 0xda, 0x13, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xf2, 0xee, 0xb3, 0xe5, + 0x6a, 0x64, 0x45, 0x31, 0xad, 0x9c, 0x91, 0x48, 0x43, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xbf, + 0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xeb, 0xe0, 0x3d, 0x48, 0xc0, 0x30, 0x11, 0xc1, 0xc2, 0xfd, 0x34, + 0x7a, 0xed, 0x66, 0xf9, 0xa8, 0x3b, 0xaa, 0xdc, 0x11, 0x7f, 0x95, 0x51, 0xb6, 0x5c, 0xf2, 0x0f, + 0x4a, 0x80, 0xe0, 0x2e, 0x28, 0x10, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0xaf, 0xdd, 0xea, 0x0f, 0xa8, + 0xd0, 0x59, 0x19, 0x6b, 0x35, 0x8b, 0x85, 0xe4, 0x1f, 0x52, 0x58, 0xa5, 0x5f, 0xe4, 0xc0, 0xec, + 0x52, 0x4c, 0xa3, 0xc0, 0x43, 0x98, 0x06, 0x31, 0xa9, 0xe1, 0xa5, 0xc0, 0x8d, 0x3d, 0x7f, 0x19, + 0xd7, 0x1d, 0xdf, 0x89, 0x58, 0x8c, 0xce, 0x81, 0x41, 0xdf, 0xf2, 0xb0, 0x8c, 0x99, 0x31, 0xe9, + 0xc9, 0xc1, 0xdb, 0x96, 0x87, 0x11, 0xe7, 0x30, 0x09, 0x16, 0x22, 0x72, 0x07, 0x28, 0x89, 0x3b, + 0x07, 0x21, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xa1, 0x7a, 0x40, 0x3c, 0x4b, 0xac, 0xde, 0x48, 0xba, + 0x1e, 0x37, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x1e, 0x8c, 0xda, 0x98, 0xd6, 0x88, 0x13, 0x32, 0x68, + 0x73, 0x90, 0x0b, 0x9f, 0x95, 0xc2, 0xa3, 0xcb, 0x29, 0x0b, 0xe9, 0x72, 0xf0, 0x2a, 0x28, 0x84, + 0xc4, 0x09, 0x88, 0x13, 0x1d, 0x98, 0xf9, 0x39, 0x63, 0x3e, 0x5f, 0x99, 0x90, 0x63, 0x0a, 0x1b, + 0x92, 0x8e, 0x94, 0x04, 0x93, 0x7e, 0x9b, 0x06, 0xfe, 0x86, 0x15, 0x6d, 0x9b, 0x43, 0x1c, 0x41, + 0x49, 0xbf, 0x52, 0x5d, 0xbf, 0xcd, 0xe8, 0x48, 0x49, 0x94, 0xfe, 0x6c, 0x00, 0x33, 0xeb, 0xa1, + 0xc4, 0xbd, 0xf0, 0x06, 0x28, 0xd0, 0x88, 0xe5, 0x9c, 0xc6, 0x81, 0xf4, 0xcf, 0x93, 0x89, 0xaa, + 0xaa, 0xa4, 0x1f, 0x36, 0x8b, 0xd3, 0xe9, 0x88, 0x84, 0xca, 0x7d, 0xa3, 0xc6, 0xb2, 0x90, 0xdb, + 0xc3, 0x5b, 0xdb, 0x41, 0xb0, 0x23, 0x57, 0xff, 0x18, 0x21, 0xf7, 0xaa, 0x50, 0x94, 0x62, 0x8a, + 0x90, 0x93, 0x64, 0x94, 0x00, 0x95, 0xfe, 0x93, 0xcb, 0x4e, 0x4c, 0x5b, 0xf4, 0xb7, 0x40, 0x81, + 0x6d, 0x21, 0xdb, 0x8a, 0x2c, 0xb9, 0x09, 0x9e, 0x79, 0xb4, 0x0d, 0x27, 0xf6, 0xeb, 0x1a, 0x8e, + 0xac, 0x0a, 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0x70, 0x1f, 0x0c, 0xd2, 0x10, 0xd7, 0xe4, + 0x7c, 0xef, 0x1d, 0x23, 0xda, 0x7b, 0xcc, 0xa1, 0x1a, 0xe2, 0x5a, 0x1a, 0x8c, 0xec, 0x1f, 0xe2, + 0x88, 0xf0, 0x1d, 0x03, 0x0c, 0x51, 0x9e, 0x17, 0x64, 0x2e, 0xd9, 0x3c, 0x01, 0xf0, 0x4c, 0xde, + 0x11, 0xff, 0x91, 0xc4, 0x2d, 0xfd, 0x33, 0x07, 0x2e, 0xf5, 0x1a, 0xba, 0x14, 0xf8, 0xb6, 0x58, + 0x84, 0x55, 0xb9, 0xaf, 0x44, 0x64, 0x3d, 0xaf, 0xef, 0xab, 0xc3, 0x66, 0xf1, 0xf1, 0x87, 0x2a, + 0xd0, 0x36, 0xe0, 0x97, 0xd5, 0x94, 0xc5, 0x26, 0xbd, 0xd4, 0x6e, 0xd8, 0x61, 0xb3, 0x38, 0xae, + 0x86, 0xb5, 0xdb, 0x0a, 0x77, 0x01, 0x74, 0x2d, 0x1a, 0xdd, 0x21, 0x96, 0x4f, 0x85, 0x5a, 0xc7, + 0xc3, 0xd2, 0x73, 0x4f, 0x3e, 0x5a, 0x50, 0xb0, 0x11, 0x95, 0x19, 0x09, 0x09, 0x6f, 0x75, 0x68, + 0x43, 0x5d, 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, + 0xc2, 0x27, 0xc0, 0xb0, 0x87, 0x29, 0xb5, 0x1a, 0x98, 0xef, 0xfd, 0x91, 0xf4, 0x50, 0x5c, 0x13, + 0x64, 0x94, 0xf0, 0x4b, 0xff, 0x32, 0xc0, 0x85, 0x5e, 0x5e, 0xbb, 0xe5, 0xd0, 0x08, 0x7e, 0xb3, + 0x23, 0xec, 0xcb, 0x8f, 0x36, 0x43, 0x36, 0x9a, 0x07, 0xbd, 0x4a, 0x25, 0x09, 0x45, 0x0b, 0xf9, + 0x3d, 0x90, 0x77, 0x22, 0xec, 0x25, 0xa7, 0x25, 0xea, 0x7f, 0xd8, 0x55, 0x4e, 0x4b, 0xf8, 0xfc, + 0x2a, 0x03, 0x42, 0x02, 0xaf, 0xf4, 0x61, 0x0e, 0x5c, 0xec, 0x35, 0x84, 0xe5, 0x71, 0xca, 0x9c, + 0x1d, 0xba, 0x31, 0xb1, 0x5c, 0x19, 0x6c, 0xca, 0xd9, 0x1b, 0x9c, 0x8a, 0x24, 0x97, 0xe5, 0x4e, + 0xea, 0xf8, 0x8d, 0xd8, 0xb5, 0x88, 0x8c, 0x24, 0x35, 0xe1, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0xcb, + 0x00, 0xd0, 0xed, 0x80, 0x44, 0x1c, 0x83, 0x57, 0x38, 0x23, 0x95, 0x33, 0x2c, 0x23, 0x54, 0x15, + 0x15, 0x69, 0x12, 0xec, 0x20, 0xd9, 0x71, 0x7c, 0x5b, 0x2e, 0xb8, 0xda, 0xbb, 0x37, 0x1d, 0xdf, + 0x46, 0x9c, 0xc3, 0xf0, 0x5d, 0x87, 0x46, 0x8c, 0x22, 0x57, 0xbb, 0xcd, 0xe1, 0x5c, 0x52, 0x49, + 0x30, 0xfc, 0x1a, 0x4b, 0xb0, 0x01, 0x71, 0x30, 0x35, 0x87, 0x52, 0xfc, 0x25, 0x45, 0x45, 0x9a, + 0x44, 0xe9, 0x2f, 0x83, 0xbd, 0xe3, 0x83, 0x25, 0x10, 0x78, 0x19, 0xe4, 0x1b, 0x24, 0x88, 0x43, + 0xe9, 0x25, 0xe5, 0xed, 0x97, 0x18, 0x11, 0x09, 0x1e, 0xfc, 0x36, 0xc8, 0xfb, 0x72, 0xc2, 0x2c, + 0x82, 0x5e, 0xed, 0xff, 0x32, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x07, 0xf2, + 0xb4, 0x16, 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x32, 0xe2, 0x61, 0xb3, 0x78, 0x3a, 0x51, + 0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1b, 0xa0, 0x20, 0x8f, 0x0b, 0x6a, 0x0e, 0xf3, 0xf0, 0x7c, + 0xad, 0xff, 0x76, 0xcb, 0xb2, 0x37, 0x5d, 0x33, 0x49, 0xa0, 0x48, 0x81, 0xc3, 0xef, 0x1a, 0x00, + 0xd4, 0xd4, 0xd9, 0x65, 0x8e, 0x70, 0x1f, 0xf6, 0x6d, 0xab, 0x68, 0xa7, 0xa2, 0x08, 0x84, 0xb4, + 0x54, 0xd2, 0x50, 0x61, 0x15, 0x4c, 0x85, 0x04, 0x73, 0xdd, 0x77, 0xfd, 0x1d, 0x3f, 0xd8, 0xf3, + 0x6f, 0x38, 0xd8, 0xb5, 0xa9, 0x09, 0xe6, 0x8c, 0xf9, 0x42, 0xe5, 0xa2, 0xb4, 0x7f, 0x6a, 0xa3, + 0x9b, 0x10, 0xea, 0x3e, 0xb6, 0xf4, 0xee, 0x40, 0xb6, 0xd6, 0xca, 0x9e, 0x17, 0xf0, 0x7d, 0x31, + 0x79, 0x91, 0x87, 0xa9, 0x69, 0xf0, 0x85, 0x78, 0xa3, 0xff, 0x0b, 0xa1, 0x72, 0x7d, 0x7a, 0x48, + 0x2b, 0x12, 0x45, 0x9a, 0x09, 0xf0, 0xa7, 0x06, 0x38, 0x6d, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, + 0xb6, 0x71, 0xee, 0x64, 0xa3, 0x7a, 0x4a, 0x1a, 0x74, 0x7a, 0x51, 0x47, 0x45, 0xed, 0x46, 0xc0, + 0x17, 0xc1, 0x19, 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x5b, 0xcd, 0xe2, 0x99, + 0x6a, 0x1b, 0x07, 0x65, 0x24, 0x4b, 0x1f, 0xe7, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, + 0x15, 0x30, 0xc4, 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, + 0x89, 0xe1, 0xb3, 0xe3, 0x69, 0x80, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x6b, + 0x00, 0xd8, 0x38, 0x24, 0x98, 0x65, 0x24, 0xdb, 0x1c, 0xe6, 0xd2, 0x6a, 0x7d, 0x96, 0x15, 0x07, + 0x69, 0x52, 0xf0, 0x06, 0x80, 0xc9, 0x3f, 0x27, 0xf0, 0x5f, 0xb5, 0x88, 0xef, 0xf8, 0x0d, 0xb3, + 0xc0, 0xcd, 0x9e, 0x66, 0xa7, 0xed, 0x72, 0x07, 0x17, 0x75, 0x19, 0x01, 0x77, 0xc1, 0x90, 0xb8, + 0x46, 0xf3, 0xbc, 0xd1, 0xc7, 0x1d, 0x77, 0xcf, 0x72, 0x1d, 0x9b, 0x43, 0x55, 0x00, 0x77, 0x0f, + 0x47, 0x41, 0x12, 0x0d, 0xbe, 0x67, 0x80, 0x31, 0x1a, 0x6f, 0x11, 0x29, 0x4d, 0x79, 0x56, 0x1f, + 0xbd, 0x76, 0xa7, 0x5f, 0xf0, 0x55, 0x4d, 0x77, 0x65, 0xa2, 0xd5, 0x2c, 0x8e, 0xe9, 0x14, 0xd4, + 0x86, 0x0d, 0x7f, 0x6f, 0x00, 0xd3, 0xb2, 0x45, 0xe8, 0x5b, 0xee, 0x06, 0x71, 0xfc, 0x08, 0x13, + 0x71, 0x21, 0x12, 0xc7, 0x47, 0x1f, 0x6b, 0xc5, 0xec, 0x3d, 0xab, 0x32, 0x27, 0x57, 0xda, 0x5c, + 0xec, 0x61, 0x01, 0xea, 0x69, 0x5b, 0xe9, 0xdf, 0x46, 0x36, 0xb5, 0x68, 0xb3, 0xac, 0xd6, 0x2c, + 0x17, 0xc3, 0x65, 0x30, 0xc1, 0xaa, 0x5f, 0x84, 0x43, 0xd7, 0xa9, 0x59, 0x94, 0xdf, 0x7e, 0x44, + 0x74, 0xab, 0x6b, 0x78, 0x35, 0xc3, 0x47, 0x1d, 0x23, 0xe0, 0x2b, 0x00, 0x8a, 0xb2, 0xb0, 0x4d, + 0x8f, 0xa8, 0x04, 0x54, 0x81, 0x57, 0xed, 0x90, 0x40, 0x5d, 0x46, 0xc1, 0x25, 0x30, 0xe9, 0x5a, + 0x5b, 0xd8, 0xad, 0x62, 0x17, 0xd7, 0xa2, 0x80, 0x70, 0x55, 0xe2, 0x7e, 0x38, 0xd5, 0x6a, 0x16, + 0x27, 0x6f, 0x65, 0x99, 0xa8, 0x53, 0xbe, 0x74, 0x29, 0xbb, 0x97, 0xf5, 0x89, 0x8b, 0x62, 0xfb, + 0x83, 0x1c, 0x98, 0xe9, 0x1d, 0x14, 0xf0, 0x3b, 0xaa, 0x34, 0x16, 0x15, 0xdf, 0x6b, 0x27, 0x10, + 0x7a, 0xf2, 0x3a, 0x00, 0x3a, 0xaf, 0x02, 0xf0, 0x80, 0x9d, 0xd7, 0x96, 0x9b, 0x5c, 0xfb, 0x37, + 0x4f, 0x02, 0x9d, 0xe9, 0xaf, 0x8c, 0x88, 0x2a, 0xc0, 0x72, 0xf9, 0xa1, 0x6f, 0xb9, 0xb8, 0xf4, + 0x61, 0xc7, 0xd5, 0x36, 0xdd, 0xac, 0xf0, 0x07, 0x06, 0x18, 0x0f, 0x42, 0xec, 0x2f, 0x6e, 0xac, + 0xde, 0xfb, 0xa2, 0xd8, 0xb4, 0xd2, 0x41, 0xab, 0x47, 0x37, 0x91, 0xdd, 0xaf, 0x85, 0xae, 0x0d, + 0x12, 0x84, 0xb4, 0x72, 0xb6, 0xd5, 0x2c, 0x8e, 0xaf, 0xb7, 0xa3, 0xa0, 0x2c, 0x6c, 0xc9, 0x03, + 0x53, 0x2b, 0xfb, 0x11, 0x26, 0xbe, 0xe5, 0x2e, 0x07, 0xb5, 0xd8, 0xc3, 0x7e, 0x24, 0x6c, 0xcc, + 0xb4, 0x0b, 0x8c, 0x47, 0x6c, 0x17, 0x5c, 0x04, 0x03, 0x31, 0x71, 0x65, 0xd4, 0x8e, 0xaa, 0x26, + 0x18, 0xba, 0x85, 0x18, 0xbd, 0x74, 0x09, 0x0c, 0x32, 0x3b, 0xe1, 0x79, 0x30, 0x40, 0xac, 0x3d, + 0xae, 0x75, 0xac, 0x32, 0xcc, 0x44, 0x90, 0xb5, 0x87, 0x18, 0xad, 0xf4, 0xf7, 0x39, 0x30, 0x9e, + 0x99, 0x0b, 0x9c, 0x01, 0x39, 0xd5, 0x59, 0x03, 0x52, 0x69, 0x6e, 0x75, 0x19, 0xe5, 0x1c, 0x1b, + 0xbe, 0xa0, 0xb2, 0xab, 0x00, 0x2d, 0xaa, 0xc3, 0x82, 0x53, 0x59, 0x59, 0x96, 0xaa, 0x63, 0x86, + 0x24, 0xe9, 0x91, 0xd9, 0x80, 0xeb, 0x72, 0x57, 0x08, 0x1b, 0x70, 0x1d, 0x31, 0xda, 0x51, 0x7b, + 0x25, 0x49, 0xb3, 0x26, 0xff, 0x08, 0xcd, 0x9a, 0xa1, 0x07, 0x36, 0x6b, 0x2e, 0x83, 0x7c, 0xe4, + 0x44, 0x2e, 0xe6, 0x27, 0x95, 0x56, 0x0c, 0xdf, 0x61, 0x44, 0x24, 0x78, 0x10, 0x83, 0x61, 0x1b, + 0xd7, 0xad, 0xd8, 0x8d, 0xf8, 0xa1, 0x34, 0x7a, 0xed, 0xeb, 0xc7, 0x8b, 0x1e, 0xd1, 0xcc, 0x58, + 0x16, 0x2a, 0x51, 0xa2, 0x1b, 0x3e, 0x0e, 0x86, 0x3d, 0x6b, 0xdf, 0xf1, 0x62, 0x8f, 0x57, 0x8c, + 0x86, 0x10, 0x5b, 0x13, 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0xfb, 0x35, 0x37, 0xa6, 0xce, 0x2e, + 0x96, 0x4c, 0x59, 0xd2, 0xa9, 0x24, 0xb8, 0x92, 0xe1, 0xa3, 0x8e, 0x11, 0x1c, 0xcc, 0xf1, 0xf9, + 0xe0, 0x51, 0x0d, 0x4c, 0x90, 0x50, 0xc2, 0x6b, 0x07, 0x93, 0xf2, 0x63, 0xbd, 0xc0, 0xe4, 0xe0, + 0x8e, 0x11, 0xf0, 0x29, 0x30, 0xe2, 0x59, 0xfb, 0xb7, 0xb0, 0xdf, 0x88, 0xb6, 0xcd, 0xd3, 0x73, + 0xc6, 0xfc, 0x40, 0xe5, 0x74, 0xab, 0x59, 0x1c, 0x59, 0x4b, 0x88, 0x28, 0xe5, 0x73, 0x61, 0xc7, + 0x97, 0xc2, 0x67, 0x34, 0xe1, 0x84, 0x88, 0x52, 0x3e, 0xab, 0x4c, 0x42, 0x2b, 0x62, 0xfb, 0xca, + 0x1c, 0x6f, 0xbf, 0x38, 0x6f, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x1e, 0x14, 0x3c, 0x6b, 0x9f, 0xdf, + 0x29, 0xcd, 0x09, 0xae, 0x96, 0x37, 0x14, 0xd7, 0x24, 0x0d, 0x29, 0x2e, 0x97, 0x74, 0x7c, 0x21, + 0x39, 0xa9, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x16, 0xbf, 0xb1, 0xef, 0xdc, 0x8f, 0xb1, 0x10, 0x86, + 0xdc, 0x33, 0x2a, 0x7e, 0xef, 0xa6, 0x2c, 0xa4, 0xcb, 0xb1, 0x3b, 0x9d, 0x17, 0xbb, 0x91, 0x13, + 0xba, 0x78, 0xbd, 0x6e, 0x9e, 0xe5, 0xfe, 0xe7, 0xa5, 0xfc, 0x9a, 0xa2, 0x22, 0x4d, 0x02, 0xbe, + 0x05, 0x06, 0xb1, 0x1f, 0x7b, 0xe6, 0x39, 0x7e, 0x7c, 0x1f, 0x37, 0xfa, 0xd4, 0x7e, 0x59, 0xf1, + 0x63, 0x0f, 0x71, 0xcd, 0xf0, 0x05, 0x70, 0xda, 0xb3, 0xf6, 0x59, 0x12, 0xc0, 0x24, 0x62, 0x17, + 0xcd, 0x29, 0x3e, 0xef, 0x49, 0x56, 0xc4, 0xae, 0xe9, 0x0c, 0xd4, 0x2e, 0xc7, 0x07, 0x3a, 0xbe, + 0x36, 0x70, 0x5a, 0x1b, 0xa8, 0x33, 0x50, 0xbb, 0x1c, 0x73, 0x32, 0xc1, 0xf7, 0x63, 0x87, 0x60, + 0xdb, 0xfc, 0x3f, 0x5e, 0xf7, 0xca, 0xfe, 0xae, 0xa0, 0x21, 0xc5, 0x85, 0xf7, 0x93, 0x96, 0x83, + 0xc9, 0x37, 0xdf, 0x46, 0xdf, 0x52, 0xf7, 0x3a, 0x59, 0x24, 0xc4, 0x3a, 0x10, 0xa7, 0x8a, 0xde, + 0x6c, 0x80, 0x3e, 0xc8, 0x5b, 0xae, 0xbb, 0x5e, 0x37, 0xcf, 0x73, 0x8f, 0xf7, 0xf1, 0xb4, 0x50, + 0x19, 0x66, 0x91, 0xe9, 0x47, 0x02, 0x86, 0xe1, 0x05, 0x3e, 0x8b, 0x85, 0x99, 0x13, 0xc3, 0x5b, + 0x67, 0xfa, 0x91, 0x80, 0xe1, 0xf3, 0xf3, 0x0f, 0xd6, 0xeb, 0xe6, 0x63, 0x27, 0x37, 0x3f, 0xa6, + 0x1f, 0x09, 0x18, 0x68, 0x83, 0x01, 0x3f, 0x88, 0xcc, 0x0b, 0xfd, 0x3e, 0x7b, 0xf9, 0x69, 0x72, + 0x3b, 0x88, 0x10, 0x53, 0x0f, 0x7f, 0x64, 0x00, 0x10, 0xa6, 0x91, 0x78, 0xf1, 0xb8, 0x2d, 0x80, + 0x0c, 0x5a, 0x39, 0x8d, 0xde, 0x15, 0x3f, 0x22, 0x07, 0xe9, 0xbd, 0x46, 0x8b, 0x72, 0xcd, 0x00, + 0xf8, 0x73, 0x03, 0x9c, 0xd3, 0xcb, 0x5d, 0x65, 0xd9, 0x2c, 0xf7, 0xc3, 0x7a, 0x1f, 0x03, 0xb9, + 0x12, 0x04, 0x6e, 0xc5, 0x6c, 0x35, 0x8b, 0xe7, 0x16, 0xbb, 0x00, 0xa2, 0xae, 0x66, 0xc0, 0x5f, + 0x1b, 0x60, 0x52, 0x66, 0x47, 0xcd, 0xb8, 0x22, 0x77, 0xdb, 0x5b, 0x7d, 0x74, 0x5b, 0x16, 0x42, + 0x78, 0x4f, 0x7d, 0x65, 0xec, 0xe0, 0xa3, 0x4e, 0xab, 0xe0, 0xef, 0x0c, 0x30, 0x66, 0xe3, 0x10, + 0xfb, 0x36, 0xf6, 0x6b, 0xcc, 0xcc, 0xb9, 0xe3, 0xf6, 0x15, 0xb2, 0x66, 0x2e, 0x6b, 0xda, 0x85, + 0x85, 0x65, 0x69, 0xe1, 0x98, 0xce, 0x3a, 0x6c, 0x16, 0xa7, 0xd3, 0xa1, 0x3a, 0x07, 0xb5, 0x19, + 0x08, 0x7f, 0x6c, 0x80, 0xf1, 0xd4, 0xed, 0xe2, 0x80, 0xb8, 0x74, 0x32, 0x0b, 0xcf, 0x4b, 0xd0, + 0xc5, 0x76, 0x2c, 0x94, 0x05, 0x87, 0xbf, 0x31, 0x58, 0xb5, 0x95, 0xdc, 0xd5, 0xa8, 0x59, 0xe2, + 0x1e, 0x7c, 0xbd, 0x9f, 0x1e, 0x54, 0xca, 0x85, 0x03, 0xaf, 0xa6, 0x95, 0x9c, 0xe2, 0x1c, 0x36, + 0x8b, 0x53, 0xba, 0xff, 0x14, 0x03, 0xe9, 0xc6, 0xc1, 0x77, 0x0d, 0x30, 0x86, 0xd3, 0x82, 0x99, + 0x9a, 0x97, 0x8f, 0xeb, 0xba, 0xae, 0xe5, 0xb7, 0xb8, 0x4e, 0x6b, 0x2c, 0x8a, 0xda, 0x60, 0x59, + 0xed, 0x87, 0xf7, 0x2d, 0x2f, 0x74, 0xb1, 0xf9, 0xff, 0xfd, 0xab, 0xfd, 0x56, 0x84, 0x4a, 0x94, + 0xe8, 0x86, 0x57, 0x41, 0xc1, 0x8f, 0x5d, 0xd7, 0xda, 0x72, 0xb1, 0xf9, 0x38, 0xaf, 0x22, 0x54, + 0x7f, 0xf1, 0xb6, 0xa4, 0x23, 0x25, 0x01, 0xeb, 0x60, 0x6e, 0xff, 0xa6, 0x7a, 0x7c, 0xd1, 0xb5, + 0x81, 0x67, 0x5e, 0xe1, 0x5a, 0x66, 0x5a, 0xcd, 0xe2, 0xf4, 0x66, 0xf7, 0x16, 0xdf, 0x43, 0x75, + 0xc0, 0x37, 0xc0, 0x63, 0x9a, 0xcc, 0x8a, 0xb7, 0x85, 0x6d, 0x1b, 0xdb, 0xc9, 0x45, 0xcb, 0xfc, + 0x02, 0x87, 0x50, 0xfb, 0x78, 0x33, 0x2b, 0x80, 0x1e, 0x34, 0x1a, 0xde, 0x02, 0xd3, 0x1a, 0x7b, + 0xd5, 0x8f, 0xd6, 0x49, 0x35, 0x22, 0x8e, 0xdf, 0x30, 0xe7, 0xb9, 0xde, 0x73, 0xc9, 0xee, 0xdb, + 0xd4, 0x78, 0xa8, 0xc7, 0x18, 0xf8, 0x72, 0x9b, 0x36, 0xfe, 0xe1, 0xc2, 0x0a, 0x6f, 0xe2, 0x03, + 0x6a, 0x3e, 0xc1, 0x8b, 0x0b, 0xbe, 0xce, 0x9b, 0x1a, 0x1d, 0xf5, 0x90, 0x87, 0xdf, 0x00, 0x67, + 0x33, 0x1c, 0x76, 0xaf, 0x30, 0x9f, 0x14, 0x17, 0x04, 0x56, 0x89, 0x6e, 0x26, 0x44, 0xd4, 0x4d, + 0x12, 0x7e, 0x15, 0x40, 0x8d, 0xbc, 0x66, 0x85, 0x7c, 0xfc, 0x53, 0xe2, 0xae, 0xc2, 0x56, 0x74, + 0x53, 0xd2, 0x50, 0x17, 0x39, 0xf8, 0x81, 0xd1, 0x36, 0x93, 0xf4, 0x36, 0x4b, 0xcd, 0xab, 0x7c, + 0xc3, 0xbe, 0x7c, 0xf4, 0x00, 0x4c, 0x95, 0xa1, 0xd8, 0xc5, 0x9a, 0x87, 0x35, 0x14, 0xd4, 0x03, + 0x7d, 0x86, 0x5d, 0xa6, 0x33, 0x39, 0x1c, 0x4e, 0x80, 0x81, 0x1d, 0x2c, 0x3f, 0x1b, 0x23, 0xf6, + 0x13, 0xbe, 0x09, 0xf2, 0xbb, 0x96, 0x1b, 0x27, 0xad, 0x80, 0xfe, 0x9d, 0xf5, 0x48, 0xe8, 0x7d, + 0x31, 0x77, 0xdd, 0x98, 0x79, 0xdf, 0x00, 0xd3, 0xdd, 0x4f, 0x95, 0xcf, 0xcb, 0xa2, 0x9f, 0x19, + 0x60, 0xb2, 0xe3, 0x00, 0xe9, 0x62, 0x8c, 0xdb, 0x6e, 0xcc, 0xbd, 0x3e, 0x9e, 0x04, 0x62, 0x23, + 0xf0, 0x8a, 0x56, 0xb7, 0xec, 0x87, 0x06, 0x98, 0xc8, 0x26, 0xe6, 0xcf, 0xc9, 0x4b, 0xa5, 0xf7, + 0x72, 0x60, 0xba, 0x7b, 0x0d, 0x0e, 0x3d, 0xd5, 0x5d, 0xe8, 0x7b, 0x83, 0xa6, 0x5b, 0xcb, 0xf6, + 0x1d, 0x03, 0x8c, 0xbe, 0xad, 0xe4, 0x92, 0xaf, 0x99, 0xfd, 0xec, 0x0a, 0x25, 0x47, 0x5f, 0xca, + 0xa0, 0x48, 0x87, 0x2c, 0xfd, 0xd6, 0x00, 0x53, 0x5d, 0x8f, 0x73, 0x78, 0x05, 0x0c, 0x59, 0xae, + 0x1b, 0xec, 0x89, 0x6e, 0x9e, 0xd6, 0x96, 0x5f, 0xe4, 0x54, 0x24, 0xb9, 0x9a, 0xcf, 0x72, 0x9f, + 0x81, 0xcf, 0x4a, 0x7f, 0x30, 0xc0, 0x85, 0x07, 0x45, 0xdd, 0x67, 0xbd, 0x86, 0xf3, 0xa0, 0x20, + 0x8b, 0xed, 0x03, 0xbe, 0x7e, 0x32, 0xbb, 0xca, 0x8c, 0xc0, 0x5f, 0xcb, 0x88, 0x5f, 0xa5, 0x5f, + 0x1a, 0x60, 0xa2, 0x8a, 0xc9, 0xae, 0x53, 0xc3, 0x08, 0xd7, 0x31, 0xc1, 0x7e, 0x0d, 0xc3, 0x05, + 0x30, 0xc2, 0xbf, 0x36, 0x86, 0x56, 0x2d, 0xf9, 0x46, 0x32, 0x29, 0x1d, 0x3d, 0x72, 0x3b, 0x61, + 0xa0, 0x54, 0x46, 0x7d, 0x4f, 0xc9, 0xf5, 0xfc, 0x9e, 0x72, 0x01, 0x0c, 0x86, 0x69, 0x03, 0xb8, + 0xc0, 0xb8, 0xbc, 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0xf2, 0x92, 0x1b, 0x90, + 0x08, 0x71, 0x6a, 0xe9, 0x4f, 0x39, 0x70, 0xa6, 0x3d, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x1d, 0x1f, + 0x70, 0x18, 0x0f, 0x71, 0x8e, 0xfe, 0x6e, 0x20, 0xf7, 0xe0, 0x77, 0x03, 0xf0, 0x25, 0x30, 0x29, + 0x7f, 0xae, 0xec, 0x87, 0x04, 0x53, 0xfe, 0x65, 0x72, 0xa0, 0xfd, 0xbd, 0xdf, 0x5a, 0x56, 0x00, + 0x75, 0x8e, 0x81, 0x5f, 0xc9, 0xbc, 0x69, 0xb8, 0x9c, 0xbe, 0x67, 0x60, 0xb5, 0x1d, 0x2f, 0x1d, + 0xee, 0xb1, 0x2d, 0xbf, 0x42, 0x48, 0x40, 0x32, 0x0f, 0x1d, 0x16, 0xc0, 0x48, 0x9d, 0x09, 0xf0, + 0x3e, 0x79, 0xbe, 0xdd, 0xe9, 0x37, 0x12, 0x06, 0x4a, 0x65, 0xe0, 0xd7, 0xc0, 0x78, 0x10, 0x8a, + 0x2a, 0x76, 0xdd, 0xb5, 0xab, 0xd8, 0xad, 0xf3, 0x8e, 0x5e, 0x21, 0x69, 0xbb, 0xb6, 0xb1, 0x50, + 0x56, 0xb6, 0xf4, 0x47, 0x03, 0x9c, 0x4d, 0x1e, 0x13, 0xb9, 0x0e, 0xf6, 0xa3, 0xa5, 0xc0, 0xaf, + 0x3b, 0x0d, 0x78, 0x5e, 0xb4, 0x4f, 0xb5, 0x9e, 0x64, 0xd2, 0x3a, 0x85, 0xf7, 0xc1, 0x30, 0x15, + 0xb1, 0x22, 0xc3, 0xf8, 0x95, 0xa3, 0x87, 0x71, 0x36, 0xe8, 0x44, 0xf5, 0x97, 0x50, 0x13, 0x1c, + 0x16, 0xc9, 0x35, 0xab, 0x12, 0xfb, 0xb6, 0x6c, 0xa1, 0x8f, 0x89, 0x48, 0x5e, 0x5a, 0x14, 0x34, + 0xa4, 0xb8, 0xa5, 0x7f, 0x18, 0x60, 0xb2, 0xe3, 0x71, 0x14, 0xfc, 0x9e, 0x01, 0xc6, 0x6a, 0xda, + 0xf4, 0x64, 0x3e, 0x58, 0x3b, 0xfe, 0x03, 0x2c, 0x4d, 0xa9, 0x28, 0xa1, 0x74, 0x0a, 0x6a, 0x03, + 0x85, 0x9b, 0xc0, 0xac, 0x65, 0xde, 0x21, 0x66, 0xbe, 0x6c, 0x5e, 0x68, 0x35, 0x8b, 0xe6, 0x52, + 0x0f, 0x19, 0xd4, 0x73, 0x74, 0xe5, 0x5b, 0x1f, 0x7d, 0x3a, 0x7b, 0xea, 0xe3, 0x4f, 0x67, 0x4f, + 0x7d, 0xf2, 0xe9, 0xec, 0xa9, 0x77, 0x5a, 0xb3, 0xc6, 0x47, 0xad, 0x59, 0xe3, 0xe3, 0xd6, 0xac, + 0xf1, 0x49, 0x6b, 0xd6, 0xf8, 0x6b, 0x6b, 0xd6, 0xf8, 0xc9, 0xdf, 0x66, 0x4f, 0xbd, 0x7e, 0xfd, + 0xa8, 0xaf, 0x8f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x28, 0x77, 0xf5, 0x22, 0xd1, 0x2c, 0x00, + 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2633,6 +2635,16 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OptionalOldSelf != nil { + i-- + if *m.OptionalOldSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } i -= len(m.FieldPath) copy(dAtA[i:], m.FieldPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) @@ -3367,6 +3379,9 @@ func (m *ValidationRule) Size() (n int) { } l = len(m.FieldPath) n += 1 + l + sovGenerated(uint64(l)) + if m.OptionalOldSelf != nil { + n += 2 + } return n } @@ -3845,6 +3860,7 @@ func (this *ValidationRule) String() string { `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `Reason:` + valueToStringGenerated(this.Reason) + `,`, `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`, `}`, }, "") return s @@ -9008,6 +9024,27 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalOldSelf = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 578d018a7..3c39d63a5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -658,6 +658,18 @@ message ValidationRule { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. optional string rule = 1; // Message represents the message displayed when validation fails. The message is required if the Rule contains @@ -698,6 +710,24 @@ message ValidationRule { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional optional string fieldPath = 5; + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + optional bool optionalOldSelf = 6; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 1c90d464a..a81451ad6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -249,6 +249,19 @@ type ValidationRule struct { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` // Message represents the message displayed when validation fails. The message is required if the Rule contains // line breaks. The message must not contain line breaks. @@ -285,6 +298,24 @@ type ValidationRule struct { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"` + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go index 0a82e4d8c..405021bf3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -1261,6 +1261,7 @@ func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *Validatio out.MessageExpression = in.MessageExpression out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } @@ -1275,6 +1276,7 @@ func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextens out.MessageExpression = in.MessageExpression out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go index b4347b8db..bc23fcd86 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -619,6 +619,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { *out = new(FieldValueErrorReason) **out = **in } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index f8a5ffbfb..b5e5c35c5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -536,6 +536,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { *out = new(FieldValueErrorReason) **out = **in } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209de..cbdf2eeb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43e..2eebec667 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b47d554b3..69f1bc336 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -592,6 +592,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 2e33283ef..0f58d66c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) } } return strMap, true, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6a..f46a24cc6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -257,3 +257,26 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } + +type encoderWithAllocator struct { + encoder EncoderWithAllocator + memAllocator MemoryAllocator +} + +// NewEncoderWithAllocator returns a new encoder +func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { + return &encoderWithAllocator{ + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { + return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) +} + +// Identifier returns identifier of this encoder. +func (e *encoderWithAllocator) Identifier() Identifier { + return e.encoder.Identifier() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 87b3fec3f..971c46d49 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -134,23 +134,3 @@ func (e *encoder) Encode(obj runtime.Object) error { e.buf.Reset() return err } - -type encoderWithAllocator struct { - writer io.Writer - encoder runtime.EncoderWithAllocator - memAllocator runtime.MemoryAllocator -} - -// NewEncoderWithAllocator returns a new streaming encoder -func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder { - return &encoderWithAllocator{ - writer: w, - encoder: e, - memAllocator: a, - } -} - -// Encode writes the provided object to the nested writer -func (e *encoderWithAllocator) Encode(obj runtime.Object) error { - return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index 1328dd612..ad486d580 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,6 +136,19 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 32f075782..a32fce5a0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -17,6 +17,7 @@ limitations under the License. package httpstream import ( + "errors" "fmt" "io" "net/http" @@ -95,6 +96,26 @@ type Stream interface { Identifier() uint32 } +// UpgradeFailureError encapsulates the cause for why the streaming +// upgrade request failed. Implements error interface. +type UpgradeFailureError struct { + Cause error +} + +func (u *UpgradeFailureError) Error() string { + return fmt.Sprintf("unable to upgrade streaming request: %s", u.Cause) +} + +// IsUpgradeFailure returns true if the passed error is (or wrapped error contains) +// the UpgradeFailureError. +func IsUpgradeFailure(err error) bool { + if err == nil { + return false + } + var upgradeErr *UpgradeFailureError + return errors.As(err, &upgradeErr) +} + // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index 7fe52ee56..c78326fa3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" + apiproxy "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apimachinery/third_party/forked/golang/netutil" ) @@ -68,6 +69,10 @@ type SpdyRoundTripper struct { // pingPeriod is a period for sending Ping frames over established // connections. pingPeriod time.Duration + + // upgradeTransport is an optional substitute for dialing if present. This field is + // mutually exclusive with the "tlsConfig", "Dialer", and "proxier". + upgradeTransport http.RoundTripper } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -76,43 +81,61 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use the specified // tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper { +func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, + TLS: tlsConfig, + UpgradeTransport: nil, }) } // NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the // specified tlsConfig and proxy func. -func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper { +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxier, + TLS: tlsConfig, + Proxier: proxier, + UpgradeTransport: nil, }) } // NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified -// configuration. -func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper { +// configuration. Returns an error if the SpdyRoundTripper is misconfigured. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) { + // Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier. + if cfg.UpgradeTransport != nil { + if cfg.TLS != nil || cfg.Proxier != nil { + return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier") + } + tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport) + if err != nil { + return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err) + } + cfg.TLS = tlsConfig + } if cfg.Proxier == nil { cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } return &SpdyRoundTripper{ - tlsConfig: cfg.TLS, - proxier: cfg.Proxier, - pingPeriod: cfg.PingPeriod, - } + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + upgradeTransport: cfg.UpgradeTransport, + }, nil } // RoundTripperConfig is a set of options for an SpdyRoundTripper. type RoundTripperConfig struct { - // TLS configuration used by the round tripper. + // TLS configuration used by the round tripper if UpgradeTransport not present. TLS *tls.Config // Proxier is a proxy function invoked on each request. Optional. Proxier func(*http.Request) (*url.URL, error) // PingPeriod is a period for sending SPDY Pings on the connection. // Optional. PingPeriod time.Duration + // UpgradeTransport is a subtitute transport used for dialing. If set, + // this field will be used instead of "TLS" and "Proxier" for connection creation. + // Optional. + UpgradeTransport http.RoundTripper } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -123,7 +146,13 @@ func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { // Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { - conn, err := s.dial(req) + var conn net.Conn + var err error + if s.upgradeTransport != nil { + conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport) + } else { + conn, err = s.dial(req) + } if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go index 09f54a49c..7cfdd0632 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go @@ -21,16 +21,19 @@ import ( "fmt" "io" "net/http" - "regexp" "strings" "time" "golang.org/x/net/websocket" - "k8s.io/klog/v2" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" ) +const WebSocketProtocolHeader = "Sec-Websocket-Protocol" + // The Websocket subprotocol "channel.k8s.io" prepends each binary message with a byte indicating // the channel number (zero indexed) the message was sent on. Messages in both directions should // prefix their messages with this channel byte. When used for remote execution, the channel numbers @@ -77,18 +80,30 @@ const ( ReadWriteChannel ) -var ( - // connectionUpgradeRegex matches any Connection header value that includes upgrade - connectionUpgradeRegex = regexp.MustCompile("(^|.*,\\s*)upgrade($|\\s*,)") -) - // IsWebSocketRequest returns true if the incoming request contains connection upgrade headers // for WebSockets. func IsWebSocketRequest(req *http.Request) bool { if !strings.EqualFold(req.Header.Get("Upgrade"), "websocket") { return false } - return connectionUpgradeRegex.MatchString(strings.ToLower(req.Header.Get("Connection"))) + return httpstream.IsUpgradeRequest(req) +} + +// IsWebSocketRequestWithStreamCloseProtocol returns true if the request contains headers +// identifying that it is requesting a websocket upgrade with a remotecommand protocol +// version that supports the "CLOSE" signal; false otherwise. +func IsWebSocketRequestWithStreamCloseProtocol(req *http.Request) bool { + if !IsWebSocketRequest(req) { + return false + } + requestedProtocols := strings.TrimSpace(req.Header.Get(WebSocketProtocolHeader)) + for _, requestedProtocol := range strings.Split(requestedProtocols, ",") { + if protocolSupportsStreamClose(strings.TrimSpace(requestedProtocol)) { + return true + } + } + + return false } // IgnoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the @@ -172,15 +187,46 @@ func (conn *Conn) SetIdleTimeout(duration time.Duration) { conn.timeout = duration } +// SetWriteDeadline sets a timeout on writing to the websocket connection. The +// passed "duration" identifies how far into the future the write must complete +// by before the timeout fires. +func (conn *Conn) SetWriteDeadline(duration time.Duration) { + conn.ws.SetWriteDeadline(time.Now().Add(duration)) //nolint:errcheck +} + // Open the connection and create channels for reading and writing. It returns // the selected subprotocol, a slice of channels and an error. func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) (string, []io.ReadWriteCloser, error) { + // serveHTTPComplete is channel that is closed/selected when "websocket#ServeHTTP" finishes. + serveHTTPComplete := make(chan struct{}) + // Ensure panic in spawned goroutine is propagated into the parent goroutine. + panicChan := make(chan any, 1) go func() { - defer runtime.HandleCrash() - defer conn.Close() + // If websocket server returns, propagate panic if necessary. Otherwise, + // signal HTTPServe finished by closing "serveHTTPComplete". + defer func() { + if p := recover(); p != nil { + panicChan <- p + } else { + close(serveHTTPComplete) + } + }() websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req) }() - <-conn.ready + + // In normal circumstances, "websocket.Server#ServeHTTP" calls "initialize" which closes + // "conn.ready" and then blocks until serving is complete. + select { + case <-conn.ready: + klog.V(8).Infof("websocket server initialized--serving") + case <-serveHTTPComplete: + // websocket server returned before completing initialization; cleanup and return error. + conn.closeNonThreadSafe() //nolint:errcheck + return "", nil, fmt.Errorf("websocket server finished before becoming ready") + case p := <-panicChan: + panic(p) + } + rwc := make([]io.ReadWriteCloser, len(conn.channels)) for i := range conn.channels { rwc[i] = conn.channels[i] @@ -229,20 +275,37 @@ func (conn *Conn) resetTimeout() { } } -// Close is only valid after Open has been called -func (conn *Conn) Close() error { - <-conn.ready +// closeNonThreadSafe cleans up by closing streams and the websocket +// connection *without* waiting for the "ready" channel. +func (conn *Conn) closeNonThreadSafe() error { for _, s := range conn.channels { s.Close() } - conn.ws.Close() - return nil + var err error + if conn.ws != nil { + err = conn.ws.Close() + } + return err +} + +// Close is only valid after Open has been called +func (conn *Conn) Close() error { + <-conn.ready + return conn.closeNonThreadSafe() +} + +// protocolSupportsStreamClose returns true if the passed protocol +// supports the stream close signal (currently only V5 remotecommand); +// false otherwise. +func protocolSupportsStreamClose(protocol string) bool { + return protocol == remotecommand.StreamProtocolV5Name } // handle implements a websocket handler. func (conn *Conn) handle(ws *websocket.Conn) { - defer conn.Close() conn.initialize(ws) + defer conn.Close() + supportsStreamClose := protocolSupportsStreamClose(conn.selectedProtocol) for { conn.resetTimeout() @@ -256,6 +319,21 @@ func (conn *Conn) handle(ws *websocket.Conn) { if len(data) == 0 { continue } + if supportsStreamClose && data[0] == remotecommand.StreamClose { + if len(data) != 2 { + klog.Errorf("Single channel byte should follow stream close signal. Got %d bytes", len(data)-1) + break + } else { + channel := data[1] + if int(channel) >= len(conn.channels) { + klog.Errorf("Close is targeted for a channel %d that is not valid, possible protocol error", channel) + break + } + klog.V(4).Infof("Received half-close signal from client; close %d stream", channel) + conn.channels[channel].Close() // After first Close, other closes are noop. + } + continue + } channel := data[0] if conn.codec == base64Codec { channel = channel - '0' diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go index a1aa1688b..3dd6f828b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go @@ -16,6 +16,54 @@ limitations under the License. // Package wsstream contains utilities for streaming content over WebSockets. // The Conn type allows callers to multiplex multiple read/write channels over -// a single websocket. The Reader type allows an io.Reader to be copied over -// a websocket channel as binary content. +// a single websocket. +// +// "channel.k8s.io" +// +// The Websocket RemoteCommand subprotocol "channel.k8s.io" prepends each binary message with a +// byte indicating the channel number (zero indexed) the message was sent on. Messages in both +// directions should prefix their messages with this channel byte. Used for remote execution, +// the channel numbers are by convention defined to match the POSIX file-descriptors assigned +// to STDIN, STDOUT, and STDERR (0, 1, and 2). No other conversion is performed on the raw +// subprotocol - writes are sent as they are received by the server. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "channel.k8s.io" +// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) +// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) +// CLOSE +// +// "v2.channel.k8s.io" +// +// The second Websocket subprotocol version "v2.channel.k8s.io" is the same as version 1, +// but it is the first "versioned" subprotocol. +// +// "v3.channel.k8s.io" +// +// The third version of the Websocket RemoteCommand subprotocol adds another channel +// for terminal resizing events. This channel is prepended with the byte '3', and it +// transmits two window sizes (encoding TerminalSize struct) with integers in the range +// (0,65536]. +// +// "v4.channel.k8s.io" +// +// The fourth version of the Websocket RemoteCommand subprotocol adds a channel for +// errors. This channel returns structured errors containing process exit codes. The +// error is "apierrors.StatusError{}". +// +// "v5.channel.k8s.io" +// +// The fifth version of the Websocket RemoteCommand subprotocol adds a CLOSE signal, +// which is sent as the first byte of the message. The second byte is the channel +// id. This CLOSE signal is handled by the websocket server by closing the stream, +// allowing the other streams to complete transmission if necessary, and gracefully +// shutdown the connection. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "v5.channel.k8s.io" +// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) +// WRITE []byte{255, 0} # send CLOSE signal (STDIN) +// CLOSE package wsstream // import "k8s.io/apimachinery/pkg/util/httpstream/wsstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 0ea88156b..f358c794d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -72,14 +72,14 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an integer before +// Parse the given string and try to convert it to an int32 integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) + i, err := strconv.ParseInt(val, 10, 32) if err != nil { return FromString(val) } - return FromInt(i) + return FromInt32(int32(i)) } // UnmarshalJSON implements the json.Unmarshaller interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 2112c9ab7..786ad991c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -95,11 +96,11 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } - newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) } @@ -139,11 +140,13 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } + // Don't allow duplicates in the applied object. patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) if err != nil { return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go index 1ac96d7f7..c6449467c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -32,7 +32,7 @@ import ( // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. type TypeConverter interface { - ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) TypedToObject(*typed.TypedValue) (runtime.Object, error) } @@ -54,7 +54,7 @@ func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields return &typeConverter{parser: tr}, nil } -func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { gvk := obj.GetObjectKind().GroupVersionKind() t := c.parser[gvk] if t == nil { @@ -62,9 +62,9 @@ func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, er } switch o := obj.(type) { case *unstructured.Unstructured: - return t.FromUnstructured(o.UnstructuredContent()) + return t.FromUnstructured(o.UnstructuredContent(), opts...) default: - return t.FromStructured(obj) + return t.FromStructured(obj, opts...) } } @@ -84,12 +84,12 @@ func NewDeducedTypeConverter() TypeConverter { } // ObjectToTyped converts an object into a TypedValue with a "deduced type". -func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { switch o := obj.(type) { case *unstructured.Unstructured: - return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) default: - return typed.DeducedParseableType.FromStructured(obj) + return typed.DeducedParseableType.FromStructured(obj, opts...) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go new file mode 100644 index 000000000..e5196d1ee --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" + "k8s.io/klog/v2" +) + +// DialURL will dial the specified URL using the underlying dialer held by the passed +// RoundTripper. The primary use of this method is to support proxying upgradable connections. +// For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https. +// If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the +// TLSConfig of the http.Transport +func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + dialer, err := utilnet.DialerFor(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + } + + switch url.Scheme { + case "http": + if dialer != nil { + return dialer(ctx, "tcp", dialAddr) + } + var d net.Dialer + return d.DialContext(ctx, "tcp", dialAddr) + case "https": + // Get the tls config from the transport if we recognize it + tlsConfig, err := utilnet.TLSClientConfig(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + } + + if dialer != nil { + // We have a dialer; use it to open the connection, then + // create a tls client using the connection. + netConn, err := dialer(ctx, "tcp", dialAddr) + if err != nil { + return nil, err + } + if tlsConfig == nil { + // tls.Client requires non-nil config + klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + // tls.Handshake() requires ServerName or InsecureSkipVerify + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + // tls.HandshakeContext() requires ServerName or InsecureSkipVerify + // infer the ServerName from the hostname we're connecting to. + inferredHost := dialAddr + if host, _, err := net.SplitHostPort(dialAddr); err == nil { + inferredHost = host + } + // Make a copy to avoid polluting the provided config + tlsConfigCopy := tlsConfig.Clone() + tlsConfigCopy.ServerName = inferredHost + tlsConfig = tlsConfigCopy + } + + // Since this method is primarily used within a "Connection: Upgrade" call we assume the caller is + // going to write HTTP/1.1 request to the wire. http2 should not be allowed in the TLSConfig.NextProtos, + // so we explicitly set that here. We only do this check if the TLSConfig support http/1.1. + if supportsHTTP11(tlsConfig.NextProtos) { + tlsConfig = tlsConfig.Clone() + tlsConfig.NextProtos = []string{"http/1.1"} + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.HandshakeContext(ctx); err != nil { + netConn.Close() + return nil, err + } + return tlsConn, nil + } else { + // Dial. + tlsDialer := tls.Dialer{ + Config: tlsConfig, + } + return tlsDialer.DialContext(ctx, "tcp", dialAddr) + } + default: + return nil, fmt.Errorf("unknown scheme: %s", url.Scheme) + } +} + +func supportsHTTP11(nextProtos []string) bool { + if len(nextProtos) == 0 { + return true + } + for _, proto := range nextProtos { + if proto == "http/1.1" { + return true + } + } + return false +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go similarity index 68% rename from vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/doc.go rename to vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go index 219f7df49..d14ecfad5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Kubernetes Authors. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package virtualnetworklinksclient implements the client for Virtual Network Link. -package virtualnetworklinksclient // import "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient" +// Package proxy provides transport and upgrade support for proxies. +package proxy // import "k8s.io/apimachinery/pkg/util/proxy" diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go new file mode 100644 index 000000000..5a2dd6e14 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" +) + +// atomsToAttrs states which attributes of which tags require URL substitution. +// Sources: http://www.w3.org/TR/REC-html40/index/attributes.html +// +// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 +var atomsToAttrs = map[atom.Atom]sets.String{ + atom.A: sets.NewString("href"), + atom.Applet: sets.NewString("codebase"), + atom.Area: sets.NewString("href"), + atom.Audio: sets.NewString("src"), + atom.Base: sets.NewString("href"), + atom.Blockquote: sets.NewString("cite"), + atom.Body: sets.NewString("background"), + atom.Button: sets.NewString("formaction"), + atom.Command: sets.NewString("icon"), + atom.Del: sets.NewString("cite"), + atom.Embed: sets.NewString("src"), + atom.Form: sets.NewString("action"), + atom.Frame: sets.NewString("longdesc", "src"), + atom.Head: sets.NewString("profile"), + atom.Html: sets.NewString("manifest"), + atom.Iframe: sets.NewString("longdesc", "src"), + atom.Img: sets.NewString("longdesc", "src", "usemap"), + atom.Input: sets.NewString("src", "usemap", "formaction"), + atom.Ins: sets.NewString("cite"), + atom.Link: sets.NewString("href"), + atom.Object: sets.NewString("classid", "codebase", "data", "usemap"), + atom.Q: sets.NewString("cite"), + atom.Script: sets.NewString("src"), + atom.Source: sets.NewString("src"), + atom.Video: sets.NewString("poster", "src"), + + // TODO: css URLs hidden in style elements. +} + +// Transport is a transport for text/html content that replaces URLs in html +// content with the prefix of the proxy server +type Transport struct { + Scheme string + Host string + PathPrepend string + + http.RoundTripper +} + +// RoundTrip implements the http.RoundTripper interface +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + // Add reverse proxy headers. + forwardedURI := path.Join(t.PathPrepend, req.URL.EscapedPath()) + if strings.HasSuffix(req.URL.Path, "/") { + forwardedURI = forwardedURI + "/" + } + req.Header.Set("X-Forwarded-Uri", forwardedURI) + if len(t.Host) > 0 { + req.Header.Set("X-Forwarded-Host", t.Host) + } + if len(t.Scheme) > 0 { + req.Header.Set("X-Forwarded-Proto", t.Scheme) + } + + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + resp, err := rt.RoundTrip(req) + + if err != nil { + return nil, errors.NewServiceUnavailable(fmt.Sprintf("error trying to reach service: %v", err)) + } + + if redirect := resp.Header.Get("Location"); redirect != "" { + targetURL, err := url.Parse(redirect) + if err != nil { + return nil, errors.NewInternalError(fmt.Errorf("error trying to parse Location header: %v", err)) + } + resp.Header.Set("Location", t.rewriteURL(targetURL, req.URL, req.Host)) + return resp, nil + } + + cType := resp.Header.Get("Content-Type") + cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0]) + if cType != "text/html" { + // Do nothing, simply pass through + return resp, nil + } + + return t.rewriteResponse(req, resp) +} + +var _ = net.RoundTripperWrapper(&Transport{}) + +func (rt *Transport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// rewriteURL rewrites a single URL to go through the proxy, if the URL refers +// to the same host as sourceURL, which is the page on which the target URL +// occurred, or if the URL matches the sourceRequestHost. +func (t *Transport) rewriteURL(url *url.URL, sourceURL *url.URL, sourceRequestHost string) string { + // Example: + // When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/), + // the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The + // sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the + // URL is sought, which can be different from sourceURL.Host. For example, if user sends the + // request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/), + // sourceRequestHost is "localhost:8001". + // + // If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host + // or sourceRequestHost, we should not consider the returned URL to be a completely different host. + // It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the + // necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/). + isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost + isRelative := !strings.HasPrefix(url.Path, "/") + if isDifferentHost || isRelative { + return url.String() + } + + // Do not rewrite scheme and host if the Transport has empty scheme and host + // when targetURL already contains the sourceRequestHost + if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") { + url.Scheme = t.Scheme + url.Host = t.Host + } + + origPath := url.Path + // Do not rewrite URL if the sourceURL already contains the necessary prefix. + if strings.HasPrefix(url.Path, t.PathPrepend) { + return url.String() + } + url.Path = path.Join(t.PathPrepend, url.Path) + if strings.HasSuffix(origPath, "/") { + // Add back the trailing slash, which was stripped by path.Join(). + url.Path += "/" + } + + return url.String() +} + +// rewriteHTML scans the HTML for tags with url-valued attributes, and updates +// those values with the urlRewriter function. The updated HTML is output to the +// writer. +func rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(*url.URL) string) error { + // Note: This assumes the content is UTF-8. + tokenizer := html.NewTokenizer(reader) + + var err error + for err == nil { + tokenType := tokenizer.Next() + switch tokenType { + case html.ErrorToken: + err = tokenizer.Err() + case html.StartTagToken, html.SelfClosingTagToken: + token := tokenizer.Token() + if urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok { + for i, attr := range token.Attr { + if urlAttrs.Has(attr.Key) { + url, err := url.Parse(attr.Val) + if err != nil { + // Do not rewrite the URL if it isn't valid. It is intended not + // to error here to prevent the inability to understand the + // content of the body to cause a fatal error. + continue + } + token.Attr[i].Val = urlRewriter(url) + } + } + } + _, err = writer.Write([]byte(token.String())) + default: + _, err = writer.Write(tokenizer.Raw()) + } + } + if err != io.EOF { + return err + } + return nil +} + +// rewriteResponse modifies an HTML response by updating absolute links referring +// to the original host to instead refer to the proxy transport. +func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) { + origBody := resp.Body + defer origBody.Close() + + newContent := &bytes.Buffer{} + var reader io.Reader = origBody + var writer io.Writer = newContent + encoding := resp.Header.Get("Content-Encoding") + switch encoding { + case "gzip": + var err error + reader, err = gzip.NewReader(reader) + if err != nil { + return nil, fmt.Errorf("errorf making gzip reader: %v", err) + } + gzw := gzip.NewWriter(writer) + defer gzw.Close() + writer = gzw + case "deflate": + var err error + reader = flate.NewReader(reader) + flw, err := flate.NewWriter(writer, flate.BestCompression) + if err != nil { + return nil, fmt.Errorf("errorf making flate writer: %v", err) + } + defer func() { + flw.Close() + flw.Flush() + }() + writer = flw + case "": + // This is fine + default: + // Some encoding we don't understand-- don't try to parse this + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + return resp, nil + } + + urlRewriter := func(targetUrl *url.URL) string { + return t.rewriteURL(targetUrl, req.URL, req.Host) + } + err := rewriteHTML(reader, writer, urlRewriter) + if err != nil { + klog.Errorf("Failed to rewrite URLs: %v", err) + return resp, err + } + + resp.Body = io.NopCloser(newContent) + // Update header node with new content-length + // TODO: Remove any hash/signature headers here? + resp.Header.Del("Content-Length") + resp.ContentLength = int64(newContent.Len()) + + return resp, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go new file mode 100644 index 000000000..76acdfb4a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -0,0 +1,556 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/mxk/go-flowrate/flowrate" + "k8s.io/klog/v2" +) + +// UpgradeRequestRoundTripper provides an additional method to decorate a request +// with any authentication or other protocol level information prior to performing +// an upgrade on the server. Any response will be handled by the intercepting +// proxy. +type UpgradeRequestRoundTripper interface { + http.RoundTripper + // WrapRequest takes a valid HTTP request and returns a suitably altered version + // of request with any HTTP level values required to complete the request half of + // an upgrade on the server. It does not get a chance to see the response and + // should bypass any request side logic that expects to see the response. + WrapRequest(*http.Request) (*http.Request, error) +} + +// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade +type UpgradeAwareHandler struct { + // UpgradeRequired will reject non-upgrade connections if true. + UpgradeRequired bool + // Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server + // for upgrade requests unless UseRequestLocationOnUpgrade is true. + Location *url.URL + // AppendLocationPath determines if the original path of the Location should be appended to the upstream proxy request path + AppendLocationPath bool + // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used + Transport http.RoundTripper + // UpgradeTransport, if specified, will be used as the backend transport when upgrade requests are provided. + // This allows clients to disable HTTP/2. + UpgradeTransport UpgradeRequestRoundTripper + // WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting) + WrapTransport bool + // UseRequestLocation will use the incoming request URL when talking to the backend server. + UseRequestLocation bool + // UseLocationHost overrides the HTTP host header in requests to the backend server to use the Host from Location. + // This will override the req.Host field of a request, while UseRequestLocation will override the req.URL field + // of a request. The req.URL.Host specifies the server to connect to, while the req.Host field + // specifies the Host header value to send in the HTTP request. If this is false, the incoming req.Host header will + // just be forwarded to the backend server. + UseLocationHost bool + // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. + FlushInterval time.Duration + // MaxBytesPerSec controls the maximum rate for an upstream connection. No rate is imposed if the value is zero. + MaxBytesPerSec int64 + // Responder is passed errors that occur while setting up proxying. + Responder ErrorResponder + // Reject to forward redirect response + RejectForwardingRedirects bool +} + +const defaultFlushInterval = 200 * time.Millisecond + +// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular +// error format. +type ErrorResponder interface { + Error(w http.ResponseWriter, req *http.Request, err error) +} + +// SimpleErrorResponder is the legacy implementation of ErrorResponder for callers that only +// service a single request/response per proxy. +type SimpleErrorResponder interface { + Error(err error) +} + +func NewErrorResponder(r SimpleErrorResponder) ErrorResponder { + return simpleResponder{r} +} + +type simpleResponder struct { + responder SimpleErrorResponder +} + +func (r simpleResponder) Error(w http.ResponseWriter, req *http.Request, err error) { + r.responder.Error(err) +} + +// upgradeRequestRoundTripper implements proxy.UpgradeRequestRoundTripper. +type upgradeRequestRoundTripper struct { + http.RoundTripper + upgrader http.RoundTripper +} + +var ( + _ UpgradeRequestRoundTripper = &upgradeRequestRoundTripper{} + _ utilnet.RoundTripperWrapper = &upgradeRequestRoundTripper{} +) + +// WrappedRoundTripper returns the round tripper that a caller would use. +func (rt *upgradeRequestRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// WriteToRequest calls the nested upgrader and then copies the returned request +// fields onto the passed request. +func (rt *upgradeRequestRoundTripper) WrapRequest(req *http.Request) (*http.Request, error) { + resp, err := rt.upgrader.RoundTrip(req) + if err != nil { + return nil, err + } + return resp.Request, nil +} + +// onewayRoundTripper captures the provided request - which is assumed to have +// been modified by other round trippers - and then returns a fake response. +type onewayRoundTripper struct{} + +// RoundTrip returns a simple 200 OK response that captures the provided request. +func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: http.StatusOK, + Body: io.NopCloser(&bytes.Buffer{}), + Request: req, + }, nil +} + +// MirrorRequest is a round tripper that can be called to get back the calling request as +// the core round tripper in a chain. +var MirrorRequest http.RoundTripper = onewayRoundTripper{} + +// NewUpgradeRequestRoundTripper takes two round trippers - one for the underlying TCP connection, and +// one that is able to write headers to an HTTP request. The request rt is used to set the request headers +// and that is written to the underlying connection rt. +func NewUpgradeRequestRoundTripper(connection, request http.RoundTripper) UpgradeRequestRoundTripper { + return &upgradeRequestRoundTripper{ + RoundTripper: connection, + upgrader: request, + } +} + +// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing +func normalizeLocation(location *url.URL) *url.URL { + normalized, _ := url.Parse(location.String()) + if len(normalized.Scheme) == 0 { + normalized.Scheme = "http" + } + return normalized +} + +// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning +// errors to the caller. +func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler { + return &UpgradeAwareHandler{ + Location: normalizeLocation(location), + Transport: transport, + WrapTransport: wrapTransport, + UpgradeRequired: upgradeRequired, + FlushInterval: defaultFlushInterval, + Responder: responder, + } +} + +func proxyRedirectsforRootPath(path string, w http.ResponseWriter, req *http.Request) bool { + redirect := false + method := req.Method + + // From pkg/genericapiserver/endpoints/handlers/proxy.go#ServeHTTP: + // Redirect requests with an empty path to a location that ends with a '/' + // This is essentially a hack for https://issue.k8s.io/4958. + // Note: Keep this code after tryUpgrade to not break that flow. + if len(path) == 0 && (method == http.MethodGet || method == http.MethodHead) { + var queryPart string + if len(req.URL.RawQuery) > 0 { + queryPart = "?" + req.URL.RawQuery + } + w.Header().Set("Location", req.URL.Path+"/"+queryPart) + w.WriteHeader(http.StatusMovedPermanently) + redirect = true + } + return redirect +} + +// ServeHTTP handles the proxy request +func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if h.tryUpgrade(w, req) { + return + } + if h.UpgradeRequired { + h.Responder.Error(w, req, errors.NewBadRequest("Upgrade request required")) + return + } + + loc := *h.Location + loc.RawQuery = req.URL.RawQuery + + // If original request URL ended in '/', append a '/' at the end of the + // of the proxy URL + if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") { + loc.Path += "/" + } + + proxyRedirect := proxyRedirectsforRootPath(loc.Path, w, req) + if proxyRedirect { + return + } + + if h.Transport == nil || h.WrapTransport { + h.Transport = h.defaultProxyTransport(req.URL, h.Transport) + } + + // WithContext creates a shallow clone of the request with the same context. + newReq := req.WithContext(req.Context()) + newReq.Header = utilnet.CloneHeader(req.Header) + if !h.UseRequestLocation { + newReq.URL = &loc + } + if h.UseLocationHost { + // exchanging req.Host with the backend location is necessary for backends that act on the HTTP host header (e.g. API gateways), + // because req.Host has preference over req.URL.Host in filling this header field + newReq.Host = h.Location.Host + } + + // create the target location to use for the reverse proxy + reverseProxyLocation := &url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host} + if h.AppendLocationPath { + reverseProxyLocation.Path = h.Location.Path + } + + proxy := httputil.NewSingleHostReverseProxy(reverseProxyLocation) + proxy.Transport = h.Transport + proxy.FlushInterval = h.FlushInterval + proxy.ErrorLog = log.New(noSuppressPanicError{}, "", log.LstdFlags) + if h.RejectForwardingRedirects { + oldModifyResponse := proxy.ModifyResponse + proxy.ModifyResponse = func(response *http.Response) error { + code := response.StatusCode + if code >= 300 && code <= 399 && len(response.Header.Get("Location")) > 0 { + // close the original response + response.Body.Close() + msg := "the backend attempted to redirect this request, which is not permitted" + // replace the response + *response = http.Response{ + StatusCode: http.StatusBadGateway, + Status: fmt.Sprintf("%d %s", response.StatusCode, http.StatusText(response.StatusCode)), + Body: io.NopCloser(strings.NewReader(msg)), + ContentLength: int64(len(msg)), + } + } else { + if oldModifyResponse != nil { + if err := oldModifyResponse(response); err != nil { + return err + } + } + } + return nil + } + } + if h.Responder != nil { + // if an optional error interceptor/responder was provided wire it + // the custom responder might be used for providing a unified error reporting + // or supporting retry mechanisms by not sending non-fatal errors to the clients + proxy.ErrorHandler = h.Responder.Error + } + proxy.ServeHTTP(w, newReq) +} + +type noSuppressPanicError struct{} + +func (noSuppressPanicError) Write(p []byte) (n int, err error) { + // skip "suppressing panic for copyResponse error in test; copy error" error message + // that ends up in CI tests on each kube-apiserver termination as noise and + // everybody thinks this is fatal. + if strings.Contains(string(p), "suppressing panic") { + return len(p), nil + } + return os.Stderr.Write(p) +} + +// tryUpgrade returns true if the request was handled. +func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { + if !httpstream.IsUpgradeRequest(req) { + klog.V(6).Infof("Request was not an upgrade") + return false + } + + var ( + backendConn net.Conn + rawResponse []byte + err error + ) + + location := *h.Location + if h.UseRequestLocation { + location = *req.URL + location.Scheme = h.Location.Scheme + location.Host = h.Location.Host + if h.AppendLocationPath { + location.Path = singleJoiningSlash(h.Location.Path, location.Path) + } + } + + clone := utilnet.CloneRequest(req) + // Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy + // handles this in the non-upgrade path. + utilnet.AppendForwardedForHeader(clone) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + if h.UseLocationHost { + clone.Host = h.Location.Host + } + clone.URL = &location + backendConn, err = h.DialForUpgrade(clone) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + defer backendConn.Close() + + // determine the http response code from the backend by reading from rawResponse+backendConn + backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + if len(headerBytes) > len(rawResponse) { + // we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend + rawResponse = headerBytes + } + + // If the backend did not upgrade the request, return an error to the client. If the response was + // an error, the error is forwarded directly after the connection is hijacked. Otherwise, just + // return a generic error here. + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 { + err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode) + klog.Errorf("Proxy upgrade error: %v", err) + h.Responder.Error(w, req, err) + return true + } + + // Once the connection is hijacked, the ErrorResponder will no longer work, so + // hijacking should be the last step in the upgrade. + requestHijacker, ok := w.(http.Hijacker) + if !ok { + klog.V(6).Infof("Unable to hijack response writer: %T", w) + h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) + return true + } + requestHijackedConn, _, err := requestHijacker.Hijack() + if err != nil { + klog.V(6).Infof("Unable to hijack response: %v", err) + h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) + return true + } + defer requestHijackedConn.Close() + + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { + // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. + klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) + // set read/write deadlines + deadline := time.Now().Add(10 * time.Second) + backendConn.SetReadDeadline(deadline) + requestHijackedConn.SetWriteDeadline(deadline) + // write the response to the client + err := backendHTTPResponse.Write(requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + // Indicate we handled the request + return true + } + + // Forward raw response bytes back to client. + if len(rawResponse) > 0 { + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + if _, err = requestHijackedConn.Write(rawResponse); err != nil { + utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) + } + } + + // Proxy the connection. This is bidirectional, so we need a goroutine + // to copy in each direction. Once one side of the connection exits, we + // exit the function which performs cleanup and in the process closes + // the other half of the connection in the defer. + writerComplete := make(chan struct{}) + readerComplete := make(chan struct{}) + + go func() { + var writer io.WriteCloser + if h.MaxBytesPerSec > 0 { + writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) + } else { + writer = backendConn + } + _, err := io.Copy(writer, requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from client to backend: %v", err) + } + close(writerComplete) + }() + + go func() { + var reader io.ReadCloser + if h.MaxBytesPerSec > 0 { + reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) + } else { + reader = backendConn + } + _, err := io.Copy(requestHijackedConn, reader) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + close(readerComplete) + }() + + // Wait for one half the connection to exit. Once it does the defer will + // clean up the other half of the connection. + select { + case <-writerComplete: + case <-readerComplete: + } + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + + return true +} + +// FIXME: Taken from net/http/httputil/reverseproxy.go as singleJoiningSlash is not exported to be re-used. +// See-also: https://github.com/golang/go/issues/44290 +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error) { + if h.UpgradeTransport == nil { + return dial(req, h.Transport) + } + updatedReq, err := h.UpgradeTransport.WrapRequest(req) + if err != nil { + return nil, err + } + return dial(updatedReq, h.UpgradeTransport) +} + +// getResponseCode reads a http response from the given reader, returns the response, +// the bytes read from the reader, and any error encountered +func getResponse(r io.Reader) (*http.Response, []byte, error) { + rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) + // Save the bytes read while reading the response headers into the rawResponse buffer + resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) + if err != nil { + return nil, nil, err + } + // return the http response and the raw bytes consumed from the reader in the process + return resp, rawResponse.Bytes(), nil +} + +// dial dials the backend at req.URL and writes req to it. +func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { + conn, err := DialURL(req.Context(), req.URL, transport) + if err != nil { + return nil, fmt.Errorf("error dialing backend: %v", err) + } + + if err = req.Write(conn); err != nil { + conn.Close() + return nil, fmt.Errorf("error sending request: %v", err) + } + + return conn, err +} + +func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { + scheme := url.Scheme + host := url.Host + suffix := h.Location.Path + if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") { + suffix += "/" + } + pathPrepend := strings.TrimSuffix(url.Path, suffix) + rewritingTransport := &Transport{ + Scheme: scheme, + Host: host, + PathPrepend: pathPrepend, + RoundTripper: internalTransport, + } + return &corsRemovingTransport{ + RoundTripper: rewritingTransport, + } +} + +// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers +// from the internal response. +// Implements pkg/util/net.RoundTripperWrapper +type corsRemovingTransport struct { + http.RoundTripper +} + +var _ = utilnet.RoundTripperWrapper(&corsRemovingTransport{}) + +func (rt *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := rt.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + removeCORSHeaders(resp) + return resp, nil +} + +func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// removeCORSHeaders strip CORS headers sent from the backend +// This should be called on all responses before returning +func removeCORSHeaders(resp *http.Response) { + resp.Header.Del("Access-Control-Allow-Credentials") + resp.Header.Del("Access-Control-Allow-Headers") + resp.Header.Del("Access-Control-Allow-Methods") + resp.Header.Del("Access-Control-Allow-Origin") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go index 237ebaef4..ba153ee24 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -46,8 +46,22 @@ const ( // adds support for exit codes. StreamProtocolV4Name = "v4.channel.k8s.io" + // The subprotocol "v5.channel.k8s.io" is used for remote command + // attachment/execution. It is the 5th version of the subprotocol and + // adds support for a CLOSE signal. + StreamProtocolV5Name = "v5.channel.k8s.io" + NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") ExitCodeCauseType = metav1.CauseType("ExitCode") + + // RemoteCommand stream identifiers. The first three identifiers (for STDIN, + // STDOUT, STDERR) are the same as their file descriptors. + StreamStdIn = 0 + StreamStdOut = 1 + StreamStdErr = 2 + StreamErr = 3 + StreamResize = 4 + StreamClose = 255 ) var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index df305b712..85b0cfc07 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,12 +20,17 @@ import ( "errors" "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) +const patchMergeKey = "x-kubernetes-patch-merge-key" +const patchStrategy = "x-kubernetes-patch-strategy" + type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -148,6 +153,90 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } +type PatchMetaFromOpenAPIV3 struct { + // SchemaList is required to resolve OpenAPI V3 references + SchemaList map[string]*spec.Schema + Schema *spec.Schema +} + +func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { + if s.Schema == nil { + return PatchMetaFromOpenAPIV3{}, nil + } + if len(s.Schema.Properties) == 0 { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + subschema, ok := s.Schema.Properties[key] + if !ok { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil +} + +func resolve(l *PatchMetaFromOpenAPIV3) error { + if len(l.Schema.AllOf) > 0 { + l.Schema = &l.Schema.AllOf[0] + } + if refString := l.Schema.Ref.String(); refString != "" { + str := strings.TrimPrefix(refString, "#/components/schemas/") + sch, ok := l.SchemaList[str] + if ok { + l.Schema = sch + } else { + return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) + } + } + return nil +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + if l.Schema.Items != nil { + l.Schema = l.Schema.Items.Schema + } + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) Name() string { + schema := s.Schema + if len(schema.Type) > 0 { + return strings.Join(schema.Type, "") + } + return "Struct" +} + type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index ae73bda96..bc387d011 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { +func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) + quotedValues[i] = strconv.Quote(fmt.Sprint(v)) } detail = "supported values: " + strings.Join(quotedValues, ", ") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 4c6195695..2292ba137 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -18,6 +18,7 @@ package version import ( "bytes" + "errors" "fmt" "regexp" "strconv" @@ -85,6 +86,47 @@ func parse(str string, semver bool) (*Version, error) { return v, nil } +// HighestSupportedVersion returns the highest supported version +// This function assumes that the highest supported version must be v1.x. +func HighestSupportedVersion(versions []string) (*Version, error) { + if len(versions) == 0 { + return nil, errors.New("empty array for supported versions") + } + + var ( + highestSupportedVersion *Version + theErr error + ) + + for i := len(versions) - 1; i >= 0; i-- { + currentHighestVer, err := ParseGeneric(versions[i]) + if err != nil { + theErr = err + continue + } + + if currentHighestVer.Major() > 1 { + continue + } + + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer + } + } + + if highestSupportedVersion == nil { + return nil, fmt.Errorf( + "could not find a highest supported version from versions (%v) reported: %+v", + versions, theErr) + } + + if highestSupportedVersion.Major() != 1 { + return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) + } + + return highestSupportedVersion, nil +} + // ParseGeneric parses a "generic" version string. The version string must consist of two // or more dot-separated numeric fields (the first of which can't have leading zeroes), // followed by arbitrary uninterpreted data (which need not be separated from the final diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 0dd13c626..107bfc132 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -40,6 +40,10 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding var timeCh <-chan time.Time doneCh := ctx.Done() + if !sliding { + timeCh = t.C() + } + // if immediate is true the condition is // guaranteed to be executed at least once, // if we haven't requested immediate execution, delay once @@ -50,17 +54,27 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding }(); err != nil || ok { return err } - } else { + } + + if sliding { timeCh = t.C() + } + + for { + + // Wait for either the context to be cancelled or the next invocation be called select { case <-doneCh: return ctx.Err() case <-timeCh: } - } - for { - // checking ctx.Err() is slightly faster than checking a select + // IMPORTANT: Because there is no channel priority selection in golang + // it is possible for very short timers to "win" the race in the previous select + // repeatedly even when the context has been canceled. We therefore must + // explicitly check for context cancellation on every loop and exit if true to + // guarantee that we don't invoke condition more than once after context has + // been cancelled. if err := ctx.Err(); err != nil { return err } @@ -77,21 +91,5 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding if sliding { t.Next() } - - if timeCh == nil { - timeCh = t.C() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and doneCh, and t.C select falls through. - // In order to mitigate we re-check doneCh at the beginning - // of every loop to guarantee at-most one extra execution - // of condition. - select { - case <-doneCh: - return ctx.Err() - case <-timeCh: - } } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go index 43613321b..c0b75a983 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -60,7 +59,7 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, con return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil } // a file was provided, so we just read it. - data, err := ioutil.ReadFile(configFilePath) + data, err := os.ReadFile(configFilePath) if err != nil { return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) } @@ -141,7 +140,7 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi } // there is nothing nested, so we delegate to path if pluginCfg.Path != "" { - content, err := ioutil.ReadFile(pluginCfg.Path) + content, err := os.ReadFile(pluginCfg.Path) if err != nil { klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) return nil, err diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index 25ee108ea..b7b589d27 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -141,6 +141,7 @@ type CompilationResult struct { Program cel.Program Error *apiservercel.Error ExpressionAccessor ExpressionAccessor + OutputType *cel.Type } // Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and @@ -214,6 +215,7 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op return CompilationResult{ Program: prog, ExpressionAccessor: expressionAccessor, + OutputType: ast.OutputType(), } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 38b80a304..646c640fc 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -23,6 +23,7 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" v1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -69,8 +70,8 @@ func (c *CompositedCompiler) CompileAndStoreVariables(variables []NamedExpressio } func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult { - c.CompositionEnv.AddField(variable.GetName()) result := c.Compiler.CompileCELExpression(variable, options, mode) + c.CompositionEnv.AddField(variable.GetName(), result.OutputType) c.CompositionEnv.CompiledVariables[variable.GetName()] = result return result } @@ -90,8 +91,8 @@ type CompositionEnv struct { CompiledVariables map[string]CompilationResult } -func (c *CompositionEnv) AddField(name string) { - c.MapType.Fields[name] = apiservercel.NewDeclField(name, apiservercel.DynType, true, nil, nil) +func (c *CompositionEnv) AddField(name string, celType *cel.Type) { + c.MapType.Fields[name] = apiservercel.NewDeclField(name, convertCelTypeToDeclType(celType), true, nil, nil) } func NewCompositionEnv(typeName string, baseEnvSet *environment.EnvSet) (*CompositionEnv, error) { @@ -196,3 +197,48 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { } return v } + +// convertCelTypeToDeclType converts a cel.Type to DeclType, for the use of +// the TypeProvider and the cost estimator. +// List and map types are created on-demand with their parameters converted recursively. +func convertCelTypeToDeclType(celType *cel.Type) *apiservercel.DeclType { + if celType == nil { + return apiservercel.DynType + } + switch celType { + case cel.AnyType: + return apiservercel.AnyType + case cel.BoolType: + return apiservercel.BoolType + case cel.BytesType: + return apiservercel.BytesType + case cel.DoubleType: + return apiservercel.DoubleType + case cel.DurationType: + return apiservercel.DurationType + case cel.IntType: + return apiservercel.IntType + case cel.NullType: + return apiservercel.NullType + case cel.StringType: + return apiservercel.StringType + case cel.TimestampType: + return apiservercel.TimestampType + case cel.UintType: + return apiservercel.UintType + default: + if celType.HasTrait(traits.ContainerType) && celType.HasTrait(traits.IndexerType) { + parameters := celType.Parameters() + switch len(parameters) { + case 1: + elemType := convertCelTypeToDeclType(parameters[0]) + return apiservercel.NewListType(elemType, -1) + case 2: + keyType := convertCelTypeToDeclType(parameters[0]) + valueType := convertCelTypeToDeclType(parameters[1]) + return apiservercel.NewMapType(keyType, valueType, -1) + } + } + return apiservercel.DynType + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go index 6d73e237b..86c8479c3 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go @@ -238,7 +238,7 @@ func (c *TypeChecker) typesToCheck(p *v1beta1.ValidatingAdmissionPolicy) []schem if p.Spec.MatchConstraints == nil || len(p.Spec.MatchConstraints.ResourceRules) == 0 { return nil } - + restMapperRefreshAttempted := false // at most once per policy, refresh RESTMapper and retry resolution. for _, rule := range p.Spec.MatchConstraints.ResourceRules { groups := extractGroups(&rule.Rule) if len(groups) == 0 { @@ -268,7 +268,16 @@ func (c *TypeChecker) typesToCheck(p *v1beta1.ValidatingAdmissionPolicy) []schem } resolved, err := c.RestMapper.KindsFor(gvr) if err != nil { - continue + if restMapperRefreshAttempted { + // RESTMapper refresh happens at most once per policy + continue + } + c.tryRefreshRESTMapper() + restMapperRefreshAttempted = true + resolved, err = c.RestMapper.KindsFor(gvr) + if err != nil { + continue + } } for _, r := range resolved { if !r.Empty() { @@ -344,6 +353,13 @@ func sortGVKList(list []schema.GroupVersionKind) []schema.GroupVersionKind { return list } +// tryRefreshRESTMapper refreshes the RESTMapper if it supports refreshing. +func (c *TypeChecker) tryRefreshRESTMapper() { + if r, ok := c.RestMapper.(meta.ResettableRESTMapper); ok { + r.Reset() + } +} + func buildEnv(hasParams bool, hasAuthorizer bool, types typeOverwrite) (*cel.Env, error) { baseEnv := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion()) requestType := plugincel.BuildRequestType() diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go index 78f5312a4..7b845f1d1 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -19,7 +19,6 @@ package config import ( "fmt" "io" - "io/ioutil" "path" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +46,7 @@ func LoadConfig(configFile io.Reader) (string, error) { var kubeconfigFile string if configFile != nil { // we have a config so parse it. - data, err := ioutil.ReadAll(configFile) + data, err := io.ReadAll(configFile) if err != nil { return "", err } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go index 1afb480dd..10a435d49 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "reflect" "sort" "strings" @@ -115,7 +114,7 @@ func splitStream(config io.Reader) (io.Reader, io.Reader, error) { return nil, nil, nil } - configBytes, err := ioutil.ReadAll(config) + configBytes, err := io.ReadAll(config) if err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go index 14ba08482..d42852d93 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -43,6 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, + &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, &EgressSelectorConfiguration{}, &TracingConfiguration{}, ) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index dcb082e09..f3b4ae321 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -157,3 +157,188 @@ type TracingConfiguration struct { // Embed the component config tracing configuration struct tracingapi.TracingConfiguration } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + JWT []JWTAuthenticator +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + Issuer Issuer + ClaimValidationRules []ClaimValidationRule + ClaimMappings ClaimMappings + UserValidationRules []UserValidationRule +} + +// Issuer provides the configuration for a external provider specific settings. +type Issuer struct { + URL string + CertificateAuthority string + Audiences []string +} + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + Claim string + RequiredValue string + + Expression string + Message string +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + Username PrefixedClaimOrExpression + Groups PrefixedClaimOrExpression + UID ClaimOrExpression + Extra []ExtraMapping +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + Claim string + Prefix *string + + Expression string +} + +// ClaimOrExpression provides the configuration for a single claim or expression. +type ClaimOrExpression struct { + Claim string + Expression string +} + +// ExtraMapping provides the configuration for a single extra mapping. +type ExtraMapping struct { + Key string + ValueExpression string +} + +// UserValidationRule provides the configuration for a single user validation rule. +type UserValidationRule struct { + Expression string + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type AuthorizerType + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + Webhook *WebhookConfiguration +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go new file mode 100644 index 000000000..a9af01fe7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go index e4e16c01c..7d68ac0c6 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -43,7 +43,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) } // Adds the list of known types to the given scheme. @@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &EgressSelectorConfiguration{}, ) scheme.AddKnownTypes(ConfigSchemeGroupVersion, + &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, &TracingConfiguration{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index b18d84d9d..9394ba6f7 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -158,3 +158,379 @@ type TracingConfiguration struct { // Embed the component config tracing configuration struct tracingapi.TracingConfiguration `json:",inline"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + // jwt is a list of authenticator to authenticate Kubernetes users using + // JWT compliant tokens. The authenticator will attempt to parse a raw ID token, + // verify it's been signed by the configured issuer. The public key to verify the + // signature is discovered from the issuer's public endpoint using OIDC discovery. + // For an incoming token, each JWT authenticator will be attempted in + // the order in which it is specified in this list. Note however that + // other authenticators may run before or after the JWT authenticators. + // The specific position of JWT authenticators in relation to other + // authenticators is neither defined nor stable across releases. Since + // each JWT authenticator must have a unique issuer URL, at most one + // JWT authenticator will attempt to cryptographically validate the token. + JWT []JWTAuthenticator `json:"jwt"` +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + // issuer contains the basic OIDC provider connection options. + // +required + Issuer Issuer `json:"issuer"` + + // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // +optional + ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"` + + // claimMappings points claims of a token to be treated as user attributes. + // +required + ClaimMappings ClaimMappings `json:"claimMappings"` + + // userValidationRules are rules that are applied to final user before completing authentication. + // These allow invariants to be applied to incoming identities such as preventing the + // use of the system: prefix that is commonly used by Kubernetes components. + // The validation rules are logically ANDed together and must all return true for the validation to pass. + // +optional + UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"` +} + +// Issuer provides the configuration for a external provider specific settings. +type Issuer struct { + // url points to the issuer URL in a format https://url or https://url/path. + // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + // Same value as the --oidc-issuer-url flag. + // Used to fetch discovery information unless overridden by discoveryURL. + // Required to be unique. + // Note that egress selection configuration is not used for this network connection. + // +required + URL string `json:"url"` + + // certificateAuthority contains PEM-encoded certificate authority certificates + // used to validate the connection when fetching discovery information. + // If unset, the system verifier is used. + // Same value as the content of the file referenced by the --oidc-ca-file flag. + // +optional + CertificateAuthority string `json:"certificateAuthority,omitempty"` + + // audiences is the set of acceptable audiences the JWT must be issued to. + // At least one of the entries must match the "aud" claim in presented JWTs. + // Same value as the --oidc-client-id flag (though this field supports an array). + // Required to be non-empty. + // +required + Audiences []string `json:"audiences"` +} + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + // claim is the name of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim keys are supported. + // Mutually exclusive with expression and message. + // +optional + Claim string `json:"claim,omitempty"` + // requiredValue is the value of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim values are supported. + // If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + // Mutually exclusive with expression and message. + // +optional + RequiredValue string `json:"requiredValue,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // Must produce a boolean. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. + // Must return true for the validation to pass. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and requiredValue. + // +optional + Expression string `json:"expression,omitempty"` + // message customizes the returned error message when expression returns false. + // message is a literal string. + // Mutually exclusive with claim and requiredValue. + // +optional + Message string `json:"message,omitempty"` +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + // username represents an option for the username attribute. + // The claim's value must be a singular string. + // Same as the --oidc-username-claim and --oidc-username-prefix flags. + // If username.expression is set, the expression must produce a string value. + // + // In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + // the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + // For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + // For prefix: + // (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + // set username.prefix="" + // (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + // behavior using authentication config, set username.prefix="#" + // (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + // +required + Username PrefixedClaimOrExpression `json:"username"` + // groups represents an option for the groups attribute. + // The claim's value must be a string or string array claim. + // If groups.claim is set, the prefix must be specified (and can be the empty string). + // If groups.expression is set, the expression must produce a string or string array value. + // "", [], and null values are treated as the group mapping not being present. + // +optional + Groups PrefixedClaimOrExpression `json:"groups,omitempty"` + + // uid represents an option for the uid attribute. + // Claim must be a singular string claim. + // If uid.expression is set, the expression must produce a string value. + // +optional + UID ClaimOrExpression `json:"uid"` + + // extra represents an option for the extra attribute. + // expression must produce a string or string array value. + // If the value is empty, the extra mapping will not be present. + // + // hard-coded extra key/value + // - key: "foo" + // valueExpression: "'bar'" + // This will result in an extra attribute - foo: ["bar"] + // + // hard-coded key, value copying claim value + // - key: "foo" + // valueExpression: "claims.some_claim" + // This will result in an extra attribute - foo: [value of some_claim] + // + // hard-coded key, value derived from claim value + // - key: "admin" + // valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + // This will result in: + // - if is_admin claim is present and true, extra attribute - admin: ["true"] + // - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + // + // +optional + Extra []ExtraMapping `json:"extra,omitempty"` +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + // claim is the JWT claim to use. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + // prefix is prepended to claim's value to prevent clashes with existing names. + // prefix needs to be set if claim is set and can be the empty string. + // Mutually exclusive with expression. + // +optional + Prefix *string `json:"prefix,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and prefix. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ClaimOrExpression provides the configuration for a single claim or expression. +type ClaimOrExpression struct { + // claim is the JWT claim to use. + // Either claim or expression must be set. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping provides the configuration for a single extra mapping. +type ExtraMapping struct { + // key is a string to use as the extra attribute key. + // key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + // subdomain as defined by RFC 1123. All characters trailing the first "/" must + // be valid HTTP Path characters as defined by RFC 3986. + // key must be lowercase. + // +required + Key string `json:"key"` + + // valueExpression is a CEL expression to extract extra attribute value. + // valueExpression must produce a string or string array value. + // "", [], and null values are treated as the extra mapping not being present. + // Empty string values contained within a string array are filtered out. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + ValueExpression string `json:"valueExpression"` +} + +// UserValidationRule provides the configuration for a single user info validation rule. +type UserValidationRule struct { + // expression represents the expression which will be evaluated by CEL. + // Must return true for the validation to pass. + // + // CEL expressions have access to the contents of UserInfo, organized into CEL variable: + // - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + // Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + // API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + Expression string `json:"expression"` + + // message customizes the returned error message when rule returns false. + // message is a literal string. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 41b350c48..920602068 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -56,6 +56,66 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) }); err != nil { @@ -81,6 +141,46 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Issuer_To_v1alpha1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) }); err != nil { @@ -131,6 +231,46 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) }); err != nil { @@ -183,6 +323,156 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) } +func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + out.JWT = *(*[]apiserver.JWTAuthenticator)(unsafe.Pointer(&in.JWT)) + return nil +} + +// Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + out.JWT = *(*[]JWTAuthenticator)(unsafe.Pointer(&in.JWT)) + return nil +} + +// Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function. +func Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s) +} + +func autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings is an autogenerated conversion function. +func Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + return autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in, out, s) +} + +func autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function. +func Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in, out, s) +} + +func autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function. +func Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s) +} + +func autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule is an autogenerated conversion function. +func Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in, out, s) +} + func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) @@ -266,6 +556,110 @@ func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorCon return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s) } +func autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function. +func Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + return autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s) +} + +func autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping is an autogenerated conversion function. +func Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + return autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in, out, s) +} + +func autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + out.URL = in.URL + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + return nil +} + +// Convert_v1alpha1_Issuer_To_apiserver_Issuer is an autogenerated conversion function. +func Convert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + return autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in, out, s) +} + +func autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + out.URL = in.URL + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + return nil +} + +// Convert_apiserver_Issuer_To_v1alpha1_Issuer is an autogenerated conversion function. +func Convert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + return autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in, out, s) +} + +func autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + if err := Convert_v1alpha1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function. +func Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + return autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s) +} + +func autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + if err := Convert_apiserver_Issuer_To_v1alpha1_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator is an autogenerated conversion function. +func Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + return autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in, out, s) +} + +func autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in, out, s) +} + func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { out.URL = in.URL out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) @@ -373,3 +767,105 @@ func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.U func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s) } + +func autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function. +func Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + return autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s) +} + +func autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule is an autogenerated conversion function. +func Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in, out, s) +} + +func autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index f8ac34035..932af6127 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -78,6 +78,147 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Groups.DeepCopyInto(&out.Groups) + out.UID = in.UID + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. +func (in *ClaimMappings) DeepCopy() *ClaimMappings { + if in == nil { + return nil + } + out := new(ClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. +func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { + if in == nil { + return nil + } + out := new(ClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. +func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { + if in == nil { + return nil + } + out := new(ClaimValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connection) DeepCopyInto(out *Connection) { *out = *in @@ -148,6 +289,92 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]ClaimValidationRule, len(*in)) + copy(*out, *in) + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]UserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. +func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { + if in == nil { + return nil + } + out := new(JWTAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. +func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { + if in == nil { + return nil + } + out := new(PrefixedClaimOrExpression) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { *out = *in @@ -252,3 +479,81 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. +func (in *UserValidationRule) DeepCopy() *UserValidationRule { + if in == nil { + return nil + } + out := new(UserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go index 5070cb91b..fc76be0fb 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -29,5 +29,15 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) return nil } + +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go new file mode 100644 index 000000000..843324085 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go @@ -0,0 +1,630 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + v1 "k8s.io/api/authorization/v1" + "k8s.io/api/authorization/v1beta1" + "k8s.io/apimachinery/pkg/util/sets" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/apiserver/pkg/apis/apiserver" + authenticationcel "k8s.io/apiserver/pkg/authentication/cel" + authorizationcel "k8s.io/apiserver/pkg/authorization/cel" + "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/util/cert" +) + +const ( + atLeastOneRequiredErrFmt = "at least one %s is required" +) + +var ( + root = field.NewPath("jwt") +) + +// ValidateAuthenticationConfiguration validates a given AuthenticationConfiguration. +func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration) field.ErrorList { + var allErrs field.ErrorList + + // This stricter validation is solely based on what the current implementation supports. + // TODO(aramase): when StructuredAuthenticationConfiguration feature gate is added and wired up, + // relax this check to allow 0 authenticators. This will allow us to support the case where + // API server is initially configured with no authenticators and then authenticators are added + // later via dynamic config. + if len(c.JWT) == 0 { + allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) + return allErrs + } + + // This stricter validation is because the --oidc-* flag option is singular. + // TODO(aramase): when StructuredAuthenticationConfiguration feature gate is added and wired up, + // remove the 1 authenticator limit check and add set the limit to 64. + if len(c.JWT) > 1 { + allErrs = append(allErrs, field.TooMany(root, len(c.JWT), 1)) + return allErrs + } + + // TODO(aramase): right now we only support a single JWT authenticator as + // this is wired to the --oidc-* flags. When StructuredAuthenticationConfiguration + // feature gate is added and wired up, we will remove the 1 authenticator limit + // check and add validation for duplicate issuers. + for i, a := range c.JWT { + fldPath := root.Index(i) + _, errs := validateJWTAuthenticator(a, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) + allErrs = append(allErrs, errs...) + } + + return allErrs +} + +// CompileAndValidateJWTAuthenticator validates a given JWTAuthenticator and returns a CELMapper with the compiled +// CEL expressions for claim mappings and validation rules. +// This is exported for use in oidc package. +func CompileAndValidateJWTAuthenticator(authenticator api.JWTAuthenticator) (authenticationcel.CELMapper, field.ErrorList) { + return validateJWTAuthenticator(authenticator, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) +} + +func validateJWTAuthenticator(authenticator api.JWTAuthenticator, fldPath *field.Path, structuredAuthnFeatureEnabled bool) (authenticationcel.CELMapper, field.ErrorList) { + var allErrs field.ErrorList + + compiler := authenticationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())) + mapper := &authenticationcel.CELMapper{} + + allErrs = append(allErrs, validateIssuer(authenticator.Issuer, fldPath.Child("issuer"))...) + allErrs = append(allErrs, validateClaimValidationRules(compiler, mapper, authenticator.ClaimValidationRules, fldPath.Child("claimValidationRules"), structuredAuthnFeatureEnabled)...) + allErrs = append(allErrs, validateClaimMappings(compiler, mapper, authenticator.ClaimMappings, fldPath.Child("claimMappings"), structuredAuthnFeatureEnabled)...) + allErrs = append(allErrs, validateUserValidationRules(compiler, mapper, authenticator.UserValidationRules, fldPath.Child("userValidationRules"), structuredAuthnFeatureEnabled)...) + + return *mapper, allErrs +} + +func validateIssuer(issuer api.Issuer, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, validateURL(issuer.URL, fldPath.Child("url"))...) + allErrs = append(allErrs, validateAudiences(issuer.Audiences, fldPath.Child("audiences"))...) + allErrs = append(allErrs, validateCertificateAuthority(issuer.CertificateAuthority, fldPath.Child("certificateAuthority"))...) + + return allErrs +} + +func validateURL(issuerURL string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(issuerURL) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "URL is required")) + return allErrs + } + + u, err := url.Parse(issuerURL) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, err.Error())) + return allErrs + } + if u.Scheme != "https" { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL scheme must be https")) + } + if u.User != nil { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a username or password")) + } + if len(u.RawQuery) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a query")) + } + if len(u.Fragment) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a fragment")) + } + + return allErrs +} + +func validateAudiences(audiences []string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(audiences) == 0 { + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, fldPath))) + return allErrs + } + // This stricter validation is because the --oidc-client-id flag option is singular. + // This will be removed when we support multiple audiences with the StructuredAuthenticationConfiguration feature gate. + if len(audiences) > 1 { + allErrs = append(allErrs, field.TooMany(fldPath, len(audiences), 1)) + return allErrs + } + + for i, audience := range audiences { + fldPath := fldPath.Index(i) + if len(audience) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "audience can't be empty")) + } + } + + return allErrs +} + +func validateCertificateAuthority(certificateAuthority string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(certificateAuthority) == 0 { + return allErrs + } + _, err := cert.NewPoolFromBytes([]byte(certificateAuthority)) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "", err.Error())) + } + + return allErrs +} + +func validateClaimValidationRules(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, rules []api.ClaimValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + + seenClaims := sets.NewString() + seenExpressions := sets.NewString() + var compilationResults []authenticationcel.CompilationResult + + for i, rule := range rules { + fldPath := fldPath.Index(i) + + if len(rule.Expression) > 0 && !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath.Child("expression"), rule.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + + switch { + case len(rule.Claim) > 0 && len(rule.Expression) > 0: + allErrs = append(allErrs, field.Invalid(fldPath, rule.Claim, "claim and expression can't both be set")) + case len(rule.Claim) == 0 && len(rule.Expression) == 0: + allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) + case len(rule.Claim) > 0: + if len(rule.Message) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("message"), rule.Message, "message can't be set when claim is set")) + } + if seenClaims.Has(rule.Claim) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("claim"), rule.Claim)) + } + seenClaims.Insert(rule.Claim) + case len(rule.Expression) > 0: + if len(rule.RequiredValue) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("requiredValue"), rule.RequiredValue, "requiredValue can't be set when expression is set")) + } + if seenExpressions.Has(rule.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) + continue + } + seenExpressions.Insert(rule.Expression) + + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimValidationCondition{ + Expression: rule.Expression, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + if compilationResult != nil { + compilationResults = append(compilationResults, *compilationResult) + } + } + } + + if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { + celMapper.ClaimValidationRules = authenticationcel.NewClaimsMapper(compilationResults) + } + + return allErrs +} + +func validateClaimMappings(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, m api.ClaimMappings, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + + if !structuredAuthnFeatureEnabled { + if len(m.Username.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("username").Child("expression"), m.Username.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.Groups.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("groups").Child("expression"), m.Groups.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.UID.Claim) > 0 || len(m.UID.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "uid claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.Extra) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("extra"), "", "extra claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + } + + compilationResult, err := validatePrefixClaimOrExpression(compiler, m.Username, fldPath.Child("username"), true, structuredAuthnFeatureEnabled) + if err != nil { + allErrs = append(allErrs, err...) + } else if compilationResult != nil && structuredAuthnFeatureEnabled { + celMapper.Username = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + + compilationResult, err = validatePrefixClaimOrExpression(compiler, m.Groups, fldPath.Child("groups"), false, structuredAuthnFeatureEnabled) + if err != nil { + allErrs = append(allErrs, err...) + } else if compilationResult != nil && structuredAuthnFeatureEnabled { + celMapper.Groups = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + + switch { + case len(m.UID.Claim) > 0 && len(m.UID.Expression) > 0: + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "claim and expression can't both be set")) + case len(m.UID.Expression) > 0: + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ + Expression: m.UID.Expression, + }, fldPath.Child("uid").Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + } else if structuredAuthnFeatureEnabled && compilationResult != nil { + celMapper.UID = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + } + + var extraCompilationResults []authenticationcel.CompilationResult + seenExtraKeys := sets.NewString() + + for i, mapping := range m.Extra { + fldPath := fldPath.Child("extra").Index(i) + // Key should be namespaced to the authenticator or authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo". + // xref: https://github.com/kubernetes/kubernetes/blob/3825e206cb162a7ad7431a5bdf6a065ae8422cf7/staging/src/k8s.io/apiserver/pkg/authentication/user/user.go#L31-L41 + // IsDomainPrefixedPath checks for non-empty key and that the key is prefixed with a domain name. + allErrs = append(allErrs, utilvalidation.IsDomainPrefixedPath(fldPath.Child("key"), mapping.Key)...) + if mapping.Key != strings.ToLower(mapping.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), mapping.Key, "key must be lowercase")) + } + if seenExtraKeys.Has(mapping.Key) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("key"), mapping.Key)) + continue + } + seenExtraKeys.Insert(mapping.Key) + + if len(mapping.ValueExpression) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("valueExpression"), "valueExpression is required")) + continue + } + + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ExtraMappingExpression{ + Key: mapping.Key, + Expression: mapping.ValueExpression, + }, fldPath.Child("valueExpression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + + if compilationResult != nil { + extraCompilationResults = append(extraCompilationResults, *compilationResult) + } + } + + if structuredAuthnFeatureEnabled && len(extraCompilationResults) > 0 { + celMapper.Extra = authenticationcel.NewClaimsMapper(extraCompilationResults) + } + + return allErrs +} + +func validatePrefixClaimOrExpression(compiler authenticationcel.Compiler, mapping api.PrefixedClaimOrExpression, fldPath *field.Path, claimOrExpressionRequired, structuredAuthnFeatureEnabled bool) (*authenticationcel.CompilationResult, field.ErrorList) { + var allErrs field.ErrorList + + var compilationResult *authenticationcel.CompilationResult + switch { + case len(mapping.Expression) > 0 && len(mapping.Claim) > 0: + allErrs = append(allErrs, field.Invalid(fldPath, "", "claim and expression can't both be set")) + case len(mapping.Expression) == 0 && len(mapping.Claim) == 0 && claimOrExpressionRequired: + allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) + case len(mapping.Expression) > 0: + var err *field.Error + + if mapping.Prefix != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prefix"), *mapping.Prefix, "prefix can't be set when expression is set")) + } + compilationResult, err = compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ + Expression: mapping.Expression, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + } + + case len(mapping.Claim) > 0: + if mapping.Prefix == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("prefix"), "prefix is required when claim is set. It can be set to an empty string to disable prefixing")) + } + } + + return compilationResult, allErrs +} + +func validateUserValidationRules(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, rules []api.UserValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + var compilationResults []authenticationcel.CompilationResult + + if len(rules) > 0 && !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath, "", "user validation rules are not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + + seenExpressions := sets.NewString() + for i, rule := range rules { + fldPath := fldPath.Index(i) + + if len(rule.Expression) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("expression"), "expression is required")) + continue + } + + if seenExpressions.Has(rule.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) + continue + } + seenExpressions.Insert(rule.Expression) + + compilationResult, err := compileUserCELExpression(compiler, &authenticationcel.UserValidationCondition{ + Expression: rule.Expression, + Message: rule.Message, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + + if compilationResult != nil { + compilationResults = append(compilationResults, *compilationResult) + } + } + + if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { + celMapper.UserValidationRules = authenticationcel.NewUserMapper(compilationResults) + } + + return allErrs +} + +func compileClaimsCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { + compilationResult, err := compiler.CompileClaimsExpression(expression) + if err != nil { + return nil, convertCELErrorToValidationError(fldPath, expression, err) + } + return &compilationResult, nil +} + +func compileUserCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { + compilationResult, err := compiler.CompileUserExpression(expression) + if err != nil { + return nil, convertCELErrorToValidationError(fldPath, expression, err) + } + return &compilationResult, nil +} + +// ValidateAuthorizationConfiguration validates a given AuthorizationConfiguration. +func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.String, repeatableTypes sets.String) field.ErrorList { + allErrs := field.ErrorList{} + + if len(c.Authorizers) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizers"), "at least one authorization mode must be defined")) + } + + seenAuthorizerTypes := sets.NewString() + seenAuthorizerNames := sets.NewString() + for i, a := range c.Authorizers { + fldPath := fldPath.Child("authorizers").Index(i) + aType := string(a.Type) + if aType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "")) + continue + } + if !knownTypes.Has(aType) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, knownTypes.List())) + continue + } + if seenAuthorizerTypes.Has(aType) && !repeatableTypes.Has(aType) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("type"), aType)) + continue + } + seenAuthorizerTypes.Insert(aType) + + if len(a.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if seenAuthorizerNames.Has(a.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), a.Name)) + } else if errs := utilvalidation.IsDNS1123Subdomain(a.Name); len(errs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), a.Name, fmt.Sprintf("authorizer name is invalid: %s", strings.Join(errs, ", ")))) + } + seenAuthorizerNames.Insert(a.Name) + + switch a.Type { + case api.TypeWebhook: + if a.Webhook == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("webhook"), "required when type=Webhook")) + continue + } + allErrs = append(allErrs, ValidateWebhookConfiguration(fldPath, a.Webhook)...) + default: + if a.Webhook != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("webhook"), "non-null", "may only be specified when type=Webhook")) + } + } + } + + return allErrs +} + +func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c.Timeout.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("timeout"), "")) + } else if c.Timeout.Duration > 30*time.Second || c.Timeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("timeout"), c.Timeout.Duration.String(), "must be > 0s and <= 30s")) + } + + if c.AuthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizedTTL"), "")) + } else if c.AuthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("authorizedTTL"), c.AuthorizedTTL.Duration.String(), "must be > 0s")) + } + + if c.UnauthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("unauthorizedTTL"), "")) + } else if c.UnauthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("unauthorizedTTL"), c.UnauthorizedTTL.Duration.String(), "must be > 0s")) + } + + switch c.SubjectAccessReviewVersion { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("subjectAccessReviewVersion"), "")) + case "v1": + _ = &v1.SubjectAccessReview{} + case "v1beta1": + _ = &v1beta1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("subjectAccessReviewVersion"), c.SubjectAccessReviewVersion, []string{"v1", "v1beta1"})) + } + + switch c.MatchConditionSubjectAccessReviewVersion { + case "": + if len(c.MatchConditions) > 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("matchConditionSubjectAccessReviewVersion"), "required if match conditions are specified")) + } + case "v1": + _ = &v1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("matchConditionSubjectAccessReviewVersion"), c.MatchConditionSubjectAccessReviewVersion, []string{"v1"})) + } + + switch c.FailurePolicy { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("failurePolicy"), "")) + case api.FailurePolicyNoOpinion, api.FailurePolicyDeny: + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("failurePolicy"), c.FailurePolicy, []string{"NoOpinion", "Deny"})) + } + + switch c.ConnectionInfo.Type { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "type"), "")) + case api.AuthorizationWebhookConnectionInfoTypeInCluster: + if c.ConnectionInfo.KubeConfigFile != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "can only be set when type=KubeConfigFile")) + } + case api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile: + if c.ConnectionInfo.KubeConfigFile == nil || *c.ConnectionInfo.KubeConfigFile == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "kubeConfigFile"), "")) + } else if !filepath.IsAbs(*c.ConnectionInfo.KubeConfigFile) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be an absolute path")) + } else if info, err := os.Stat(*c.ConnectionInfo.KubeConfigFile); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, fmt.Sprintf("error loading file: %v", err))) + } else if !info.Mode().IsRegular() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be a regular file")) + } + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("connectionInfo", "type"), c.ConnectionInfo, []string{api.AuthorizationWebhookConnectionInfoTypeInCluster, api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile})) + } + + _, errs := compileMatchConditions(c.MatchConditions, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) + allErrs = append(allErrs, errs...) + + return allErrs +} + +// ValidateAndCompileMatchConditions validates a given webhook's matchConditions. +// This is exported for use in authz package. +func ValidateAndCompileMatchConditions(matchConditions []api.WebhookMatchCondition) (*authorizationcel.CELMatcher, field.ErrorList) { + return compileMatchConditions(matchConditions, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) +} + +func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath *field.Path, structuredAuthzFeatureEnabled bool) (*authorizationcel.CELMatcher, field.ErrorList) { + var allErrs field.ErrorList + // should fail when match conditions are used without feature enabled + if len(matchConditions) > 0 && !structuredAuthzFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath.Child("matchConditions"), "", "matchConditions are not supported when StructuredAuthorizationConfiguration feature gate is disabled")) + } + if len(matchConditions) > 64 { + allErrs = append(allErrs, field.TooMany(fldPath.Child("matchConditions"), len(matchConditions), 64)) + return nil, allErrs + } + + compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())) + seenExpressions := sets.NewString() + var compilationResults []authorizationcel.CompilationResult + + for i, condition := range matchConditions { + fldPath := fldPath.Child("matchConditions").Index(i).Child("expression") + if len(strings.TrimSpace(condition.Expression)) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + continue + } + if seenExpressions.Has(condition.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath, condition.Expression)) + continue + } + seenExpressions.Insert(condition.Expression) + compilationResult, err := compileMatchConditionsExpression(fldPath, compiler, condition.Expression) + if err != nil { + allErrs = append(allErrs, err) + continue + } + compilationResults = append(compilationResults, compilationResult) + } + if len(compilationResults) == 0 { + return nil, allErrs + } + return &authorizationcel.CELMatcher{ + CompilationResults: compilationResults, + }, allErrs +} + +func compileMatchConditionsExpression(fldPath *field.Path, compiler authorizationcel.Compiler, expression string) (authorizationcel.CompilationResult, *field.Error) { + authzExpression := &authorizationcel.SubjectAccessReviewMatchCondition{ + Expression: expression, + } + compilationResult, err := compiler.CompileCELExpression(authzExpression) + if err != nil { + return compilationResult, convertCELErrorToValidationError(fldPath, authzExpression, err) + } + return compilationResult, nil +} + +func convertCELErrorToValidationError(fldPath *field.Path, expression authorizationcel.ExpressionAccessor, err error) *field.Error { + var celErr *cel.Error + if errors.As(err, &celErr) { + switch celErr.Type { + case cel.ErrorTypeRequired: + return field.Required(fldPath, celErr.Detail) + case cel.ErrorTypeInvalid: + return field.Invalid(fldPath, expression.GetExpression(), celErr.Detail) + default: + return field.InternalError(fldPath, celErr) + } + } + return field.InternalError(fldPath, fmt.Errorf("error is not cel error: %w", err)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 40c8b4a6e..77e5c3142 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -78,6 +78,147 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Groups.DeepCopyInto(&out.Groups) + out.UID = in.UID + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. +func (in *ClaimMappings) DeepCopy() *ClaimMappings { + if in == nil { + return nil + } + out := new(ClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. +func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { + if in == nil { + return nil + } + out := new(ClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. +func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { + if in == nil { + return nil + } + out := new(ClaimValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connection) DeepCopyInto(out *Connection) { *out = *in @@ -148,6 +289,92 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]ClaimValidationRule, len(*in)) + copy(*out, *in) + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]UserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. +func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { + if in == nil { + return nil + } + out := new(JWTAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. +func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { + if in == nil { + return nil + } + out := new(PrefixedClaimOrExpression) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { *out = *in @@ -252,3 +479,81 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. +func (in *UserValidationRule) DeepCopy() *UserValidationRule { + if in == nil { + return nil + } + out := new(UserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go index f369b2229..17a398ed8 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -235,10 +235,10 @@ type PolicyRule struct { Namespaces []string // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // `/metrics` - Log requests for apiserver metrics + // `/healthz*` - Log all health checks // +optional NonResourceURLs []string @@ -269,11 +269,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index 8cdb12cdf..13c41e54c 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -129,11 +129,11 @@ message GroupResources { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. @@ -248,10 +248,10 @@ message PolicyRule { repeated string namespaces = 6; // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks // +optional repeated string nonResourceURLs = 7; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go index 27f4729ea..151c56c68 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -229,10 +229,10 @@ type PolicyRule struct { Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks // +optional NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` @@ -263,11 +263,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go index b037371e3..aca968de6 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -19,11 +19,11 @@ package bootstrap import ( coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // The objects that define an apiserver's initial behavior. The @@ -90,8 +90,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementExempt, Exempt: &flowcontrol.ExemptPriorityLevelConfiguration{ - NominalConcurrencyShares: pointer.Int32(0), - LendablePercent: pointer.Int32(0), + NominalConcurrencyShares: ptr.To(int32(0)), + LendablePercent: ptr.To(int32(0)), }, }, ) @@ -100,8 +100,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 5, - LendablePercent: pointer.Int32(0), + NominalConcurrencyShares: ptr.To(int32(5)), + LendablePercent: ptr.To(int32(0)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeReject, }, @@ -173,8 +173,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 30, - LendablePercent: pointer.Int32(33), + NominalConcurrencyShares: ptr.To(int32(30)), + LendablePercent: ptr.To(int32(33)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -190,8 +190,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 40, - LendablePercent: pointer.Int32(25), + NominalConcurrencyShares: ptr.To(int32(40)), + LendablePercent: ptr.To(int32(25)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -208,8 +208,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 10, - LendablePercent: pointer.Int32(0), + NominalConcurrencyShares: ptr.To(int32(10)), + LendablePercent: ptr.To(int32(0)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -226,8 +226,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 40, - LendablePercent: pointer.Int32(50), + NominalConcurrencyShares: ptr.To(int32(40)), + LendablePercent: ptr.To(int32(50)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -244,8 +244,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 100, - LendablePercent: pointer.Int32(90), + NominalConcurrencyShares: ptr.To(int32(100)), + LendablePercent: ptr.To(int32(90)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -262,8 +262,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: 20, - LendablePercent: pointer.Int32(50), + NominalConcurrencyShares: ptr.To(int32(20)), + LendablePercent: ptr.To(int32(50)), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go new file mode 100644 index 000000000..3bcff5e90 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go @@ -0,0 +1,154 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/cel" + + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" +) + +const ( + claimsVarName = "claims" + userVarName = "user" +) + +// compiler implements the Compiler interface. +type compiler struct { + // varEnvs is a map of CEL environments, keyed by the name of the CEL variable. + // The CEL variable is available to the expression. + // We have 2 environments, one for claims and one for user. + varEnvs map[string]*environment.EnvSet +} + +// NewCompiler returns a new Compiler. +func NewCompiler(env *environment.EnvSet) Compiler { + return &compiler{ + varEnvs: mustBuildEnvs(env), + } +} + +// CompileClaimsExpression compiles the given expressionAccessor into a CEL program that can be evaluated. +// The claims CEL variable is available to the expression. +func (c compiler) CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + return c.compile(expressionAccessor, claimsVarName) +} + +// CompileUserExpression compiles the given expressionAccessor into a CEL program that can be evaluated. +// The user CEL variable is available to the expression. +func (c compiler) CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + return c.compile(expressionAccessor, userVarName) +} + +func (c compiler) compile(expressionAccessor ExpressionAccessor, envVarName string) (CompilationResult, error) { + resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { + return CompilationResult{}, &apiservercel.Error{ + Type: errType, + Detail: errorString, + } + } + + env, err := c.varEnvs[envVarName].Env(environment.StoredExpressions) + if err != nil { + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) + } + + ast, issues := env.Compile(expressionAccessor.GetExpression()) + if issues != nil { + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) + } + + found := false + returnTypes := expressionAccessor.ReturnTypes() + for _, returnType := range returnTypes { + if ast.OutputType() == returnType || cel.AnyType == returnType { + found = true + break + } + } + if !found { + var reason string + if len(returnTypes) == 1 { + reason = fmt.Sprintf("must evaluate to %v", returnTypes[0].String()) + } else { + reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) + } + + return resultError(reason, apiservercel.ErrorTypeInvalid) + } + + if _, err = cel.AstToCheckedExpr(ast); err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + prog, err := env.Program(ast) + if err != nil { + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) + } + + return CompilationResult{ + Program: prog, + ExpressionAccessor: expressionAccessor, + }, nil +} + +func buildUserType() *apiservercel.DeclType { + field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { + return apiservercel.NewDeclField(name, declType, required, nil, nil) + } + fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { + result := make(map[string]*apiservercel.DeclField, len(fields)) + for _, f := range fields { + result[f.Name] = f + } + return result + } + + return apiservercel.NewObjectType("kubernetes.UserInfo", fields( + field("username", apiservercel.StringType, false), + field("uid", apiservercel.StringType, false), + field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), + field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), + )) +} + +func mustBuildEnvs(baseEnv *environment.EnvSet) map[string]*environment.EnvSet { + buildEnvSet := func(envOpts []cel.EnvOption, declTypes []*apiservercel.DeclType) *environment.EnvSet { + env, err := baseEnv.Extend(environment.VersionedOptions{ + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: envOpts, + DeclTypes: declTypes, + }) + if err != nil { + panic(fmt.Sprintf("environment misconfigured: %v", err)) + } + return env + } + + userType := buildUserType() + claimsType := apiservercel.NewMapType(apiservercel.StringType, apiservercel.AnyType, -1) + + envs := make(map[string]*environment.EnvSet, 2) // build two environments, one for claims and one for user + envs[claimsVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(claimsVarName, claimsType.CelType())}, []*apiservercel.DeclType{claimsType}) + envs[userVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(userVarName, userType.CelType())}, []*apiservercel.DeclType{userType}) + + return envs +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go new file mode 100644 index 000000000..7ec0c9af6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go @@ -0,0 +1,147 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cel contains the CEL related interfaces and structs for authentication. +package cel + +import ( + "context" + + celgo "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types/ref" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ExpressionAccessor is an interface that provides access to a CEL expression. +type ExpressionAccessor interface { + GetExpression() string + ReturnTypes() []*celgo.Type +} + +// CompilationResult represents a compiled validations expression. +type CompilationResult struct { + Program celgo.Program + ExpressionAccessor ExpressionAccessor +} + +// EvaluationResult contains the minimal required fields and metadata of a cel evaluation +type EvaluationResult struct { + EvalResult ref.Val + ExpressionAccessor ExpressionAccessor +} + +// Compiler provides a CEL expression compiler configured with the desired authentication related CEL variables. +type Compiler interface { + CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) + CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) +} + +// ClaimsMapper provides a CEL expression mapper configured with the claims CEL variable. +type ClaimsMapper interface { + // EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. + // This is used for username, groups and uid claim mapping that contains a single expression. + EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) + // EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. + // This is used for extra claim mapping and claim validation that contains a list of expressions. + EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) +} + +// UserMapper provides a CEL expression mapper configured with the user CEL variable. +type UserMapper interface { + // EvalUser evaluates the given user expressions and returns a list of EvaluationResult. + // This is used for user validation that contains a list of expressions. + EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) +} + +var _ ExpressionAccessor = &ClaimMappingExpression{} + +// ClaimMappingExpression is a CEL expression that maps a claim. +type ClaimMappingExpression struct { + Expression string +} + +// GetExpression returns the CEL expression. +func (v *ClaimMappingExpression) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ClaimMappingExpression) ReturnTypes() []*celgo.Type { + // return types is only used for validation. The claims variable that's available + // to the claim mapping expressions is a map[string]interface{}, so we can't + // really know what the return type is during compilation. Strict type checking + // is done during evaluation. + return []*celgo.Type{celgo.AnyType} +} + +var _ ExpressionAccessor = &ClaimValidationCondition{} + +// ClaimValidationCondition is a CEL expression that validates a claim. +type ClaimValidationCondition struct { + Expression string + Message string +} + +// GetExpression returns the CEL expression. +func (v *ClaimValidationCondition) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ClaimValidationCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} + +var _ ExpressionAccessor = &ExtraMappingExpression{} + +// ExtraMappingExpression is a CEL expression that maps an extra to a list of values. +type ExtraMappingExpression struct { + Key string + Expression string +} + +// GetExpression returns the CEL expression. +func (v *ExtraMappingExpression) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ExtraMappingExpression) ReturnTypes() []*celgo.Type { + // return types is only used for validation. The claims variable that's available + // to the claim mapping expressions is a map[string]interface{}, so we can't + // really know what the return type is during compilation. Strict type checking + // is done during evaluation. + return []*celgo.Type{celgo.AnyType} +} + +var _ ExpressionAccessor = &UserValidationCondition{} + +// UserValidationCondition is a CEL expression that validates a User. +type UserValidationCondition struct { + Expression string + Message string +} + +// GetExpression returns the CEL expression. +func (v *UserValidationCondition) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *UserValidationCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go new file mode 100644 index 000000000..ab308bb7f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +var _ ClaimsMapper = &mapper{} +var _ UserMapper = &mapper{} + +// mapper implements the ClaimsMapper and UserMapper interface. +type mapper struct { + compilationResults []CompilationResult +} + +// CELMapper is a struct that holds the compiled expressions for +// username, groups, uid, extra, claimValidation and userValidation +type CELMapper struct { + Username ClaimsMapper + Groups ClaimsMapper + UID ClaimsMapper + Extra ClaimsMapper + ClaimValidationRules ClaimsMapper + UserValidationRules UserMapper +} + +// NewClaimsMapper returns a new ClaimsMapper. +func NewClaimsMapper(compilationResults []CompilationResult) ClaimsMapper { + return &mapper{ + compilationResults: compilationResults, + } +} + +// NewUserMapper returns a new UserMapper. +func NewUserMapper(compilationResults []CompilationResult) UserMapper { + return &mapper{ + compilationResults: compilationResults, + } +} + +// EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. +func (m *mapper) EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) { + results, err := m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) + if err != nil { + return EvaluationResult{}, err + } + if len(results) != 1 { + return EvaluationResult{}, fmt.Errorf("expected 1 evaluation result, got %d", len(results)) + } + return results[0], nil +} + +// EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. +func (m *mapper) EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) { + return m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) +} + +// EvalUser evaluates the given user expressions and returns a list of EvaluationResult. +func (m *mapper) EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) { + return m.eval(ctx, map[string]interface{}{userVarName: userInfo.Object}) +} + +func (m *mapper) eval(ctx context.Context, input map[string]interface{}) ([]EvaluationResult, error) { + evaluations := make([]EvaluationResult, len(m.compilationResults)) + + for i, compilationResult := range m.compilationResults { + var evaluation = &evaluations[i] + evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor + + evalResult, _, err := compilationResult.Program.ContextEval(ctx, input) + if err != nil { + return nil, fmt.Errorf("expression '%s' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err) + } + + evaluation.EvalResult = evalResult + } + + return evaluations, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index 63010aadc..d67c53547 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -148,6 +148,33 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R } } + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + >4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this function, + client certificate and pub key sent during the handshake process + are verified by apiserver against its cluster certificate authority data + + normal args related to this stage: + --client-ca-file string If set, any request presenting a client certificate signed by + one of the authorities in the client-ca-file is authenticated with an identity + corresponding to the CommonName of the client certificate. + + (retrievable from "kube-apiserver --help" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 2, see: staging/src/k8s.io/client-go/transport/transport.go + - for the step 3, see: staging/src/k8s.io/client-go/transport/transport.go + */ + remaining := req.TLS.PeerCertificates[0].NotAfter.Sub(time.Now()) clientCertificateExpirationHistogram.WithContext(req.Context()).Observe(remaining.Seconds()) chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go index f0dc07676..c55fe5d2e 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -36,12 +36,21 @@ const ( ServiceAccountUsernameSeparator = ":" ServiceAccountGroupPrefix = "system:serviceaccounts:" AllServiceAccountsGroup = "system:serviceaccounts" + // CredentialIDKey is the key used in a user's "extra" to specify the unique + // identifier for this identity document). + CredentialIDKey = "authentication.kubernetes.io/credential-id" // PodNameKey is the key used in a user's "extra" to specify the pod name of // the authenticating request. PodNameKey = "authentication.kubernetes.io/pod-name" // PodUIDKey is the key used in a user's "extra" to specify the pod UID of // the authenticating request. PodUIDKey = "authentication.kubernetes.io/pod-uid" + // NodeNameKey is the key used in a user's "extra" to specify the node name of + // the authenticating request. + NodeNameKey = "authentication.kubernetes.io/node-name" + // NodeUIDKey is the key used in a user's "extra" to specify the node UID of + // the authenticating request. + NodeUIDKey = "authentication.kubernetes.io/node-uid" ) // MakeUsername generates a username from the given namespace and ServiceAccount name. @@ -119,6 +128,8 @@ func UserInfo(namespace, name, uid string) user.Info { type ServiceAccountInfo struct { Name, Namespace, UID string PodName, PodUID string + CredentialID string + NodeName, NodeUID string } func (sa *ServiceAccountInfo) UserInfo() user.Info { @@ -127,15 +138,43 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { UID: sa.UID, Groups: MakeGroupNames(sa.Namespace), } + if sa.PodName != "" && sa.PodUID != "" { - info.Extra = map[string][]string{ - PodNameKey: {sa.PodName}, - PodUIDKey: {sa.PodUID}, + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[PodNameKey] = []string{sa.PodName} + info.Extra[PodUIDKey] = []string{sa.PodUID} + } + if sa.CredentialID != "" { + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[CredentialIDKey] = []string{sa.CredentialID} + } + if sa.NodeName != "" { + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[NodeNameKey] = []string{sa.NodeName} + // node UID is optional and will only be set if the node name is set + if sa.NodeUID != "" { + info.Extra[NodeUIDKey] = []string{sa.NodeUID} } } + return info } +// CredentialIDForJTI converts a given JTI string into a credential identifier for use in a +// users 'extra' info. +func CredentialIDForJTI(jti string) string { + if len(jti) == 0 { + return "" + } + return "JTI=" + jti +} + // IsServiceAccountToken returns true if the secret is a valid api token for the service account func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { if secret.Type != v1.SecretTypeServiceAccountToken { diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go index d1ead25db..a8355ee61 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -54,6 +54,7 @@ func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { c.AllowCacheTTL, c.DenyCacheTTL, *c.WebhookRetryBackoff, + authorizer.DecisionNoOpinion, webhook.AuthorizerMetrics{ RecordRequestTotal: RecordRequestTotal, RecordRequestLatency: RecordRequestLatency, diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go new file mode 100644 index 000000000..0d9293dd7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go @@ -0,0 +1,214 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types/ref" + + authorizationv1 "k8s.io/api/authorization/v1" + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" +) + +const ( + subjectAccessReviewRequestVarName = "request" +) + +// CompilationResult represents a compiled authorization cel expression. +type CompilationResult struct { + Program cel.Program + ExpressionAccessor ExpressionAccessor +} + +// EvaluationResult contains the minimal required fields and metadata of a cel evaluation +type EvaluationResult struct { + EvalResult ref.Val + ExpressionAccessor ExpressionAccessor +} + +// Compiler is an interface for compiling CEL expressions with the desired environment mode. +type Compiler interface { + CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) +} + +type compiler struct { + envSet *environment.EnvSet +} + +// NewCompiler returns a new Compiler. +func NewCompiler(env *environment.EnvSet) Compiler { + return &compiler{ + envSet: mustBuildEnv(env), + } +} + +func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { + err := &apiservercel.Error{ + Type: errType, + Detail: errorString, + } + return CompilationResult{ + ExpressionAccessor: expressionAccessor, + }, err + } + env, err := c.envSet.Env(environment.StoredExpressions) + if err != nil { + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) + } + ast, issues := env.Compile(expressionAccessor.GetExpression()) + if issues != nil { + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) + } + found := false + returnTypes := expressionAccessor.ReturnTypes() + for _, returnType := range returnTypes { + if ast.OutputType() == returnType { + found = true + break + } + } + if !found { + var reason string + if len(returnTypes) == 1 { + reason = fmt.Sprintf("must evaluate to %v but got %v", returnTypes[0].String(), ast.OutputType()) + } else { + reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) + } + + return resultError(reason, apiservercel.ErrorTypeInvalid) + } + _, err = cel.AstToCheckedExpr(ast) + if err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + prog, err := env.Program(ast) + if err != nil { + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) + } + return CompilationResult{ + Program: prog, + ExpressionAccessor: expressionAccessor, + }, nil +} + +func mustBuildEnv(baseEnv *environment.EnvSet) *environment.EnvSet { + field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { + return apiservercel.NewDeclField(name, declType, required, nil, nil) + } + fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { + result := make(map[string]*apiservercel.DeclField, len(fields)) + for _, f := range fields { + result[f.Name] = f + } + return result + } + subjectAccessReviewSpecRequestType := buildRequestType(field, fields) + extended, err := baseEnv.Extend( + environment.VersionedOptions{ + // we record this as 1.0 since it was available in the + // first version that supported this feature + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: []cel.EnvOption{ + cel.Variable(subjectAccessReviewRequestVarName, subjectAccessReviewSpecRequestType.CelType()), + }, + DeclTypes: []*apiservercel.DeclType{ + subjectAccessReviewSpecRequestType, + }, + }, + ) + if err != nil { + panic(fmt.Sprintf("environment misconfigured: %v", err)) + } + + return extended +} + +// buildRequestType generates a DeclType for SubjectAccessReviewSpec. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildRequestType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + resourceAttributesType := buildResourceAttributesType(field, fields) + nonResourceAttributesType := buildNonResourceAttributesType(field, fields) + return apiservercel.NewObjectType("kubernetes.SubjectAccessReviewSpec", fields( + field("resourceAttributes", resourceAttributesType, false), + field("nonResourceAttributes", nonResourceAttributesType, false), + field("user", apiservercel.StringType, false), + field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), + field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), + field("uid", apiservercel.StringType, false), + )) +} + +// buildResourceAttributesType generates a DeclType for ResourceAttributes. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields( + field("namespace", apiservercel.StringType, false), + field("verb", apiservercel.StringType, false), + field("group", apiservercel.StringType, false), + field("version", apiservercel.StringType, false), + field("resource", apiservercel.StringType, false), + field("subresource", apiservercel.StringType, false), + field("name", apiservercel.StringType, false), + )) +} + +// buildNonResourceAttributesType generates a DeclType for NonResourceAttributes. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildNonResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.NonResourceAttributes", fields( + field("path", apiservercel.StringType, false), + field("verb", apiservercel.StringType, false), + )) +} + +func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) map[string]interface{} { + // Construct version containing every SubjectAccessReview user and string attribute field, even omitempty ones, for evaluation by CEL + extra := obj.Extra + if extra == nil { + extra = map[string]authorizationv1.ExtraValue{} + } + ret := map[string]interface{}{ + "user": obj.User, + "groups": obj.Groups, + "uid": string(obj.UID), + "extra": extra, + } + if obj.ResourceAttributes != nil { + ret["resourceAttributes"] = map[string]string{ + "namespace": obj.ResourceAttributes.Namespace, + "verb": obj.ResourceAttributes.Verb, + "group": obj.ResourceAttributes.Group, + "version": obj.ResourceAttributes.Version, + "resource": obj.ResourceAttributes.Resource, + "subresource": obj.ResourceAttributes.Subresource, + "name": obj.ResourceAttributes.Name, + } + } + if obj.NonResourceAttributes != nil { + ret["nonResourceAttributes"] = map[string]string{ + "verb": obj.NonResourceAttributes.Verb, + "path": obj.NonResourceAttributes.Path, + } + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go new file mode 100644 index 000000000..82166830c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go @@ -0,0 +1,41 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + celgo "github.com/google/cel-go/cel" +) + +type ExpressionAccessor interface { + GetExpression() string + ReturnTypes() []*celgo.Type +} + +var _ ExpressionAccessor = &SubjectAccessReviewMatchCondition{} + +// SubjectAccessReviewMatchCondition is a CEL expression that maps a SubjectAccessReview request to a list of values. +type SubjectAccessReviewMatchCondition struct { + Expression string +} + +func (v *SubjectAccessReviewMatchCondition) GetExpression() string { + return v.Expression +} + +func (v *SubjectAccessReviewMatchCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go new file mode 100644 index 000000000..30ce5b69c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "fmt" + + celgo "github.com/google/cel-go/cel" + + authorizationv1 "k8s.io/api/authorization/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type CELMatcher struct { + CompilationResults []CompilationResult +} + +// eval evaluates the given SubjectAccessReview against all cel matchCondition expression +func (c *CELMatcher) Eval(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { + var evalErrors []error + va := map[string]interface{}{ + "request": convertObjectToUnstructured(&r.Spec), + } + for _, compilationResult := range c.CompilationResults { + evalResult, _, err := compilationResult.Program.ContextEval(ctx, va) + if err != nil { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err)) + continue + } + if evalResult.Type() != celgo.BoolType { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result type should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Type())) + continue + } + match, ok := evalResult.Value().(bool) + if !ok { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result value should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Value())) + continue + } + // If at least one matchCondition successfully evaluates to FALSE, + // return early + if !match { + return false, nil + } + } + // if there is any error, return + if len(evalErrors) > 0 { + return false, utilerrors.NewAggregate(evalErrors) + } + // return ALL matchConditions evaluate to TRUE successfully without error + return true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go b/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go index c28d6ce51..dd94e282f 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go @@ -56,12 +56,27 @@ type Schema interface { // Validations contains OpenAPI validation that the CEL library uses. type Validations interface { + Pattern() string + Minimum() *float64 + IsExclusiveMinimum() bool + Maximum() *float64 + IsExclusiveMaximum() bool + MultipleOf() *float64 + MinItems() *int64 MaxItems() *int64 + MinLength() *int64 MaxLength() *int64 + MinProperties() *int64 MaxProperties() *int64 Required() []string Enum() []any Nullable() bool + UniqueItems() bool + + AllOf() []Schema + OneOf() []Schema + AnyOf() []Schema + Not() Schema } // KubeExtensions contains Kubernetes-specific extensions to the OpenAPI schema. @@ -71,6 +86,16 @@ type KubeExtensions interface { IsXPreserveUnknownFields() bool XListType() string XListMapKeys() []string + XMapType() string + XValidations() []ValidationRule +} + +// ValidationRule represents a single x-kubernetes-validations rule. +type ValidationRule interface { + Rule() string + Message() string + MessageExpression() string + FieldPath() string } // SchemaOrBool contains either a schema or a boolean indicating if the object diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/equality.go b/vendor/k8s.io/apiserver/pkg/cel/common/equality.go new file mode 100644 index 000000000..9289637a3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/common/equality.go @@ -0,0 +1,334 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "reflect" + "time" +) + +// CorrelatedObject represents a node in a tree of objects that are being +// validated. It is used to keep track of the old value of an object during +// traversal of the new value. It is also used to cache the results of +// DeepEqual comparisons between the old and new values of objects. +// +// All receiver functions support being called on `nil` to support ergonomic +// recursive descent. The nil `CorrelatedObject` represents an uncorrelatable +// node in the tree. +// +// CorrelatedObject is not thread-safe. It is the responsibility of the caller +// to handle concurrency, if any. +type CorrelatedObject struct { + // Currently correlated old value during traversal of the schema/object + OldValue interface{} + + // Value being validated + Value interface{} + + // Schema used for validation of this value. The schema is also used + // to determine how to correlate the old object. + Schema Schema + + // Duration spent on ratcheting validation for this object and all of its + // children. + Duration *time.Duration + + // Scratch space below, may change during validation + + // Cached comparison result of DeepEqual of `value` and `thunk.oldValue` + comparisonResult *bool + + // Cached map representation of a map-type list, or nil if not map-type list + mapList MapList + + // Children spawned by a call to `Validate` on this object + // key is either a string or an index, depending upon whether `value` is + // a map or a list, respectively. + // + // The list of children may be incomplete depending upon if the internal + // logic of kube-openapi's SchemaValidator short-circuited before + // reaching all of the children. + // + // It should be expected to have an entry for either all of the children, or + // none of them. + children map[interface{}]*CorrelatedObject +} + +func NewCorrelatedObject(new, old interface{}, schema Schema) *CorrelatedObject { + d := time.Duration(0) + return &CorrelatedObject{ + OldValue: old, + Value: new, + Schema: schema, + Duration: &d, + } +} + +// If OldValue or Value is not a list, or the index is out of bounds of the +// Value list, returns nil +// If oldValue is a list, this considers the x-list-type to decide how to +// correlate old values: +// +// If listType is map, creates a map representation of the list using the designated +// map-keys, caches it for future calls, and returns the map value, or nil if +// the correlated key is not in the old map +// +// Otherwise, if the list type is not correlatable this funcion returns nil. +func (r *CorrelatedObject) correlateOldValueForChildAtNewIndex(index int) interface{} { + oldAsList, ok := r.OldValue.([]interface{}) + if !ok { + return nil + } + + asList, ok := r.Value.([]interface{}) + if !ok { + return nil + } else if len(asList) <= index { + // Cannot correlate out of bounds index + return nil + } + + listType := r.Schema.XListType() + switch listType { + case "map": + // Look up keys for this index in current object + currentElement := asList[index] + + oldList := r.mapList + if oldList == nil { + oldList = MakeMapList(r.Schema, oldAsList) + r.mapList = oldList + } + return oldList.Get(currentElement) + + case "set": + // Are sets correlatable? Only if the old value equals the current value. + // We might be able to support this, but do not currently see a lot + // of value + // (would allow you to add/remove items from sets with ratcheting but not change them) + return nil + case "": + fallthrough + case "atomic": + // Atomic lists are the default are not correlatable by item + // Ratcheting is not available on a per-index basis + return nil + default: + // Unrecognized list type. Assume non-correlatable. + return nil + } +} + +// CachedDeepEqual is equivalent to reflect.DeepEqual, but caches the +// results in the tree of ratchetInvocationScratch objects on the way: +// +// For objects and arrays, this function will make a best effort to make +// use of past DeepEqual checks performed by this Node's children, if available. +// +// If a lazy computation could not be found for all children possibly due +// to validation logic short circuiting and skipping the children, then +// this function simply defers to reflect.DeepEqual. +func (r *CorrelatedObject) CachedDeepEqual() (res bool) { + start := time.Now() + defer func() { + if r != nil && r.Duration != nil { + *r.Duration += time.Since(start) + } + }() + + if r == nil { + // Uncorrelatable node is not considered equal to its old value + return false + } else if r.comparisonResult != nil { + return *r.comparisonResult + } + + defer func() { + r.comparisonResult = &res + }() + + if r.Value == nil && r.OldValue == nil { + return true + } else if r.Value == nil || r.OldValue == nil { + return false + } + + oldAsArray, oldIsArray := r.OldValue.([]interface{}) + newAsArray, newIsArray := r.Value.([]interface{}) + + oldAsMap, oldIsMap := r.OldValue.(map[string]interface{}) + newAsMap, newIsMap := r.Value.(map[string]interface{}) + + // If old and new are not the same type, they are not equal + if (oldIsArray != newIsArray) || oldIsMap != newIsMap { + return false + } + + // Objects are known to be same type of (map, slice, or primitive) + switch { + case oldIsArray: + // Both arrays case. oldIsArray == newIsArray + if len(oldAsArray) != len(newAsArray) { + return false + } + + for i := range newAsArray { + child := r.Index(i) + if child == nil { + if r.mapList == nil { + // Treat non-correlatable array as a unit with reflect.DeepEqual + return reflect.DeepEqual(oldAsArray, newAsArray) + } + + // If array is correlatable, but old not found. Just short circuit + // comparison + return false + + } else if !child.CachedDeepEqual() { + // If one child is not equal the entire object is not equal + return false + } + } + + return true + case oldIsMap: + // Both maps case. oldIsMap == newIsMap + if len(oldAsMap) != len(newAsMap) { + return false + } + + for k := range newAsMap { + child := r.Key(k) + if child == nil { + // Un-correlatable child due to key change. + // Objects are not equal. + return false + } else if !child.CachedDeepEqual() { + // If one child is not equal the entire object is not equal + return false + } + } + + return true + + default: + // Primitive: use reflect.DeepEqual + return reflect.DeepEqual(r.OldValue, r.Value) + } +} + +// Key returns the child of the receiver with the given name. +// Returns nil if the given name is does not exist in the new object, or its +// value is not correlatable to an old value. +// If receiver is nil or if the new value is not an object/map, returns nil. +func (r *CorrelatedObject) Key(field string) *CorrelatedObject { + start := time.Now() + defer func() { + if r != nil && r.Duration != nil { + *r.Duration += time.Since(start) + } + }() + + if r == nil || r.Schema == nil { + return nil + } else if existing, exists := r.children[field]; exists { + return existing + } + + // Find correlated old value + oldAsMap, okOld := r.OldValue.(map[string]interface{}) + newAsMap, okNew := r.Value.(map[string]interface{}) + if !okOld || !okNew { + return nil + } + + oldValueForField, okOld := oldAsMap[field] + newValueForField, okNew := newAsMap[field] + if !okOld || !okNew { + return nil + } + + var propertySchema Schema + if prop, exists := r.Schema.Properties()[field]; exists { + propertySchema = prop + } else if addP := r.Schema.AdditionalProperties(); addP != nil && addP.Schema() != nil { + propertySchema = addP.Schema() + } else { + return nil + } + + if r.children == nil { + r.children = make(map[interface{}]*CorrelatedObject, len(newAsMap)) + } + + res := &CorrelatedObject{ + OldValue: oldValueForField, + Value: newValueForField, + Schema: propertySchema, + Duration: r.Duration, + } + r.children[field] = res + return res +} + +// Index returns the child of the receiver at the given index. +// Returns nil if the given index is out of bounds, or its value is not +// correlatable to an old value. +// If receiver is nil or if the new value is not an array, returns nil. +func (r *CorrelatedObject) Index(i int) *CorrelatedObject { + start := time.Now() + defer func() { + if r != nil && r.Duration != nil { + *r.Duration += time.Since(start) + } + }() + + if r == nil || r.Schema == nil { + return nil + } else if existing, exists := r.children[i]; exists { + return existing + } + + asList, ok := r.Value.([]interface{}) + if !ok || len(asList) <= i { + return nil + } + + oldValueForIndex := r.correlateOldValueForChildAtNewIndex(i) + if oldValueForIndex == nil { + return nil + } + var itemSchema Schema + if i := r.Schema.Items(); i != nil { + itemSchema = i + } else { + return nil + } + + if r.children == nil { + r.children = make(map[interface{}]*CorrelatedObject, len(asList)) + } + + res := &CorrelatedObject{ + OldValue: oldValueForIndex, + Value: asList[i], + Schema: itemSchema, + Duration: r.Duration, + } + r.children[i] = res + return res +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go index 3fdd3a6c8..19392babe 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go @@ -165,7 +165,11 @@ func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType { // unicode code point can be up to 4 bytes long) strWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength()) * 4 } else { - strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) + if len(s.Enum()) > 0 { + strWithMaxLength.MaxElements = estimateMaxStringEnumLength(s) + } else { + strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) + } } return strWithMaxLength case "boolean": @@ -239,6 +243,19 @@ func estimateMaxStringLengthPerRequest(s Schema) int64 { } } +// estimateMaxStringLengthPerRequest estimates the maximum string length (in characters) +// that has a set of enum values. +// The result of the estimation is the length of the longest possible value. +func estimateMaxStringEnumLength(s Schema) int64 { + var maxLength int64 + for _, v := range s.Enum() { + if s, ok := v.(string); ok && int64(len(s)) > maxLength { + maxLength = int64(len(s)) + } + } + return maxLength +} + // estimateMaxArrayItemsPerRequest estimates the maximum number of array items with // the provided minimum serialized size that can fit into a single request. func estimateMaxArrayItemsFromMinSize(minSize int64) int64 { diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/values.go b/vendor/k8s.io/apiserver/pkg/cel/common/values.go index d9034a80f..c8279f013 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/values.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/values.go @@ -84,18 +84,22 @@ func UnstructuredToVal(unstructured interface{}, schema Schema) ref.Val { }, } } - // A object with x-kubernetes-preserve-unknown-fields but no properties or additionalProperties is treated - // as an empty object. - if schema.IsXPreserveUnknownFields() { - return &unstructuredMap{ - value: m, - schema: schema, - propSchema: func(key string) (Schema, bool) { - return nil, false - }, - } + + // properties and additionalProperties are mutual exclusive, but nothing prevents the situation + // where both are missing. + // An object that (1) has no properties (2) has no additionalProperties or additionalProperties == false + // is treated as an empty object. + // An object that has additionalProperties == true is treated as an unstructured map. + // An object that has x-kubernetes-preserve-unknown-field extension set is treated as an unstructured map. + // Empty object vs unstructured map is differentiated by unstructuredMap implementation with the set schema. + // The resulting result remains the same. + return &unstructuredMap{ + value: m, + schema: schema, + propSchema: func(key string) (Schema, bool) { + return nil, false + }, } - return types.NewErr("invalid object type, expected either Properties or AdditionalProperties with Allows=true and non-empty Schema") } if schema.Type() == "array" { diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index ed0d34041..76a0bccee 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -22,7 +22,9 @@ import ( "sync" "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" "github.com/google/cel-go/ext" + "github.com/google/cel-go/interpreter" "golang.org/x/sync/singleflight" "k8s.io/apimachinery/pkg/util/version" @@ -41,7 +43,7 @@ import ( // desirable because it means that CEL expressions are portable across a wider range // of Kubernetes versions. func DefaultCompatibilityVersion() *version.Version { - return version.MajorMinor(1, 27) + return version.MajorMinor(1, 28) } var baseOpts = []VersionedOptions{ @@ -57,7 +59,6 @@ var baseOpts = []VersionedOptions{ cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true), - ext.Strings(ext.StringsVersion(0)), library.URLs(), library.Regex(), library.Lists(), @@ -81,7 +82,47 @@ var baseOpts = []VersionedOptions{ library.Quantity(), }, }, - // TODO: switch to ext.Strings version 2 once format() is fixed to work with HomogeneousAggregateLiterals. + // add the new validator in 1.29 + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + cel.ASTValidators( + cel.ValidateDurationLiterals(), + cel.ValidateTimestampLiterals(), + cel.ValidateRegexLiterals(), + cel.ValidateHomogeneousAggregateLiterals(), + ), + }, + }, + // String library + { + IntroducedVersion: version.MajorMinor(1, 0), + RemovedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Strings(ext.StringsVersion(0)), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Strings(ext.StringsVersion(2)), + }, + }, + // Set library + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Sets(), + // cel-go v0.17.7 introduced CostEstimatorOptions. + // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. + cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), + }, + ProgramOptions: []cel.ProgramOption{ + // cel-go v0.17.7 introduced CostTrackerOptions. + // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. + cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), + }, + }, } // MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics diff --git a/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go index 1742deb0a..16183050d 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go +++ b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go @@ -35,7 +35,7 @@ var _ traits.Mapper = (*MapValue)(nil) // MapValue is a map that lazily evaluate its value when a field is first accessed. // The map value is not designed to be thread-safe. type MapValue struct { - typeValue *types.TypeValue + typeValue *types.Type // values are previously evaluated values obtained from callbacks values map[string]ref.Val diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go index 00f0200e8..df4bf0807 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -202,6 +202,10 @@ var authzLib = &authz{} type authz struct{} +func (*authz) LibraryName() string { + return "k8s.authz" +} + var authzLibraryDecls = map[string][]cel.FunctionOpt{ "path": { cel.MemberOverload("authorizer_path", []*cel.Type{AuthorizerType, cel.StringType}, PathCheckType, @@ -578,7 +582,7 @@ type decisionVal struct { // any object type that has receiver functions but does not expose any fields to // CEL. type receiverOnlyObjectVal struct { - typeValue *types.TypeValue + typeValue *types.Type } // receiverOnlyVal returns a receiverOnlyObjectVal for the given type. diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go index 3d1b3fbb2..d18c138ec 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -101,8 +101,8 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // If the list contains strings or bytes, add the cost of traversing all the strings/bytes as a way // of estimating the additional comparison cost. if elNode := l.listElementNode(*target); elNode != nil { - t := elNode.Type().GetPrimitive() - if t == exprpb.Type_STRING || t == exprpb.Type_BYTES { + k := elNode.Type().Kind() + if k == types.StringKind || k == types.BytesKind { sz := l.sizeEstimate(elNode) elCost = elCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor)) } @@ -247,7 +247,8 @@ func (l *CostEstimator) sizeEstimate(t checker.AstNode) checker.SizeEstimate { } func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode { - if lt := list.Type().GetListType(); lt != nil { + if params := list.Type().Parameters(); len(params) > 0 { + lt := params[0] nodePath := list.Path() if nodePath != nil { // Provide path if we have it so that a OpenAPIv3 maxLength validation can be looked up, if it exists @@ -255,10 +256,10 @@ func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode { path := make([]string, len(nodePath)+1) copy(path, nodePath) path[len(nodePath)] = "@items" - return &itemsNode{path: path, t: lt.GetElemType(), expr: nil} + return &itemsNode{path: path, t: lt, expr: nil} } else { // Provide just the type if no path is available so that worst case size can be looked up based on type. - return &itemsNode{t: lt.GetElemType(), expr: nil} + return &itemsNode{t: lt, expr: nil} } } return nil @@ -273,7 +274,7 @@ func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstim type itemsNode struct { path []string - t *exprpb.Type + t *types.Type expr *exprpb.Expr } @@ -281,7 +282,7 @@ func (i *itemsNode) Path() []string { return i.path } -func (i *itemsNode) Type() *exprpb.Type { +func (i *itemsNode) Type() *types.Type { return i.t } diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go index fe51dc87f..327ec93d6 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go @@ -95,6 +95,10 @@ var listsLib = &lists{} type lists struct{} +func (*lists) LibraryName() string { + return "k8s.lists" +} + var paramA = cel.TypeParamType("A") // CEL typeParams can be used to constraint to a specific trait (e.g. traits.ComparableType) if the 1st operand is the type to constrain. diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go index 49e3dae7c..b4ac91c8a 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go @@ -22,6 +22,7 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "k8s.io/apimachinery/pkg/api/resource" apiservercel "k8s.io/apiserver/pkg/cel" ) @@ -141,6 +142,10 @@ var quantityLib = &quantity{} type quantity struct{} +func (*quantity) LibraryName() string { + return "k8s.quantity" +} + var quantityLibraryDecls = map[string][]cel.FunctionOpt{ "quantity": { cel.Overload("string_to_quantity", []*cel.Type{cel.StringType}, apiservercel.QuantityType, cel.UnaryBinding((stringToQuantity))), diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go index 17fb3d44c..147a40f9b 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -51,6 +51,10 @@ var regexLib = ®ex{} type regex struct{} +func (*regex) LibraryName() string { + return "k8s.regex" +} + var regexLibraryDecls = map[string][]cel.FunctionOpt{ "find": { cel.MemberOverload("string_find_string", []*cel.Type{cel.StringType, cel.StringType}, cel.StringType, diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/vendor/k8s.io/apiserver/pkg/cel/library/test.go index 95446f63c..dcbc058a1 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/test.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -37,6 +37,10 @@ type testLib struct { version uint32 } +func (*testLib) LibraryName() string { + return "k8s.test" +} + type TestOption func(*testLib) *testLib func TestVersion(version uint32) func(lib *testLib) *testLib { diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go index 7be054ece..8f4ba85af 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go @@ -112,6 +112,10 @@ var urlsLib = &urls{} type urls struct{} +func (*urls) LibraryName() string { + return "k8s.urls" +} + var urlLibraryDecls = map[string][]cel.FunctionOpt{ "url": { cel.Overload("string_to_url", []*cel.Type{cel.StringType}, apiservercel.URLType, diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go index 0e2cc6e2b..bc7b0d8c9 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go @@ -54,6 +54,10 @@ func (s *Schema) Format() string { return s.Schema.Format } +func (s *Schema) Pattern() string { + return s.Schema.Pattern +} + func (s *Schema) Items() common.Schema { if s.Schema.Items == nil || s.Schema.Items.Schema == nil { return nil @@ -86,14 +90,50 @@ func (s *Schema) Default() any { return s.Schema.Default } +func (s *Schema) Minimum() *float64 { + return s.Schema.Minimum +} + +func (s *Schema) IsExclusiveMinimum() bool { + return s.Schema.ExclusiveMinimum +} + +func (s *Schema) Maximum() *float64 { + return s.Schema.Maximum +} + +func (s *Schema) IsExclusiveMaximum() bool { + return s.Schema.ExclusiveMaximum +} + +func (s *Schema) MultipleOf() *float64 { + return s.Schema.MultipleOf +} + +func (s *Schema) UniqueItems() bool { + return s.Schema.UniqueItems +} + +func (s *Schema) MinItems() *int64 { + return s.Schema.MinItems +} + func (s *Schema) MaxItems() *int64 { return s.Schema.MaxItems } +func (s *Schema) MinLength() *int64 { + return s.Schema.MinLength +} + func (s *Schema) MaxLength() *int64 { return s.Schema.MaxLength } +func (s *Schema) MinProperties() *int64 { + return s.Schema.MinProperties +} + func (s *Schema) MaxProperties() *int64 { return s.Schema.MaxProperties } @@ -110,6 +150,40 @@ func (s *Schema) Nullable() bool { return s.Schema.Nullable } +func (s *Schema) AllOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.AllOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) AnyOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.AnyOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) OneOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.OneOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) Not() common.Schema { + if s.Schema.Not == nil { + return nil + } + return &Schema{s.Schema.Not} +} + func (s *Schema) IsXIntOrString() bool { return isXIntOrString(s.Schema) } @@ -126,10 +200,18 @@ func (s *Schema) XListType() string { return getXListType(s.Schema) } +func (s *Schema) XMapType() string { + return getXMapType(s.Schema) +} + func (s *Schema) XListMapKeys() []string { return getXListMapKeys(s.Schema) } +func (s *Schema) XValidations() []common.ValidationRule { + return getXValidations(s.Schema) +} + func (s *Schema) WithTypeAndObjectMeta() common.Schema { return &Schema{common.WithTypeAndObjectMeta(s.Schema)} } diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go index 6a2f83032..3bb3bccf0 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go @@ -18,6 +18,7 @@ package openapi import ( "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/cel/common" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -47,6 +48,11 @@ func getXListType(schema *spec.Schema) string { return s } +func getXMapType(schema *spec.Schema) string { + s, _ := schema.Extensions.GetString(extMapType) + return s +} + func getXListMapKeys(schema *spec.Schema) []string { mapKeys, ok := schema.Extensions.GetStringSlice(extListMapKeys) if !ok { @@ -55,8 +61,47 @@ func getXListMapKeys(schema *spec.Schema) []string { return mapKeys } +type ValidationRule struct { + RuleField string `json:"rule"` + MessageField string `json:"message"` + MessageExpressionField string `json:"messageExpression"` + PathField string `json:"fieldPath"` +} + +func (v ValidationRule) Rule() string { + return v.RuleField +} + +func (v ValidationRule) Message() string { + return v.MessageField +} + +func (v ValidationRule) FieldPath() string { + return v.PathField +} + +func (v ValidationRule) MessageExpression() string { + return v.MessageExpressionField +} + +// TODO: simplify +func getXValidations(schema *spec.Schema) []common.ValidationRule { + var rules []ValidationRule + err := schema.Extensions.GetObject(extValidations, &rules) + if err != nil { + return nil + } + results := make([]common.ValidationRule, len(rules)) + for i, rule := range rules { + results[i] = rule + } + return results +} + const extIntOrString = "x-kubernetes-int-or-string" const extEmbeddedResource = "x-kubernetes-embedded-resource" const extPreserveUnknownFields = "x-kubernetes-preserve-unknown-fields" const extListType = "x-kubernetes-list-type" +const extMapType = "x-kubernetes-map-type" const extListMapKeys = "x-kubernetes-list-map-keys" +const extValidations = "x-kubernetes-validations" diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go new file mode 100644 index 000000000..eb3c37635 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go @@ -0,0 +1,45 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Combine combines the DefinitionsSchemaResolver with a secondary schema resolver. +// The resulting schema resolver uses the DefinitionsSchemaResolver for a GVK that DefinitionsSchemaResolver knows, +// and the secondary otherwise. +func (d *DefinitionsSchemaResolver) Combine(secondary SchemaResolver) SchemaResolver { + return &combinedSchemaResolver{definitions: d, secondary: secondary} +} + +type combinedSchemaResolver struct { + definitions *DefinitionsSchemaResolver + secondary SchemaResolver +} + +// ResolveSchema takes a GroupVersionKind (GVK) and returns the OpenAPI schema +// identified by the GVK. +// If the DefinitionsSchemaResolver knows the gvk, the DefinitionsSchemaResolver handles the resolution, +// otherwise, the secondary does. +func (r *combinedSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { + if _, ok := r.definitions.gvkToRef[gvk]; ok { + return r.definitions.ResolveSchema(gvk) + } + return r.secondary.ResolveSchema(gvk) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go index df7357f77..12b353b0b 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go @@ -29,40 +29,39 @@ import ( // DefinitionsSchemaResolver resolves the schema of a built-in type // by looking up the OpenAPI definitions. type DefinitionsSchemaResolver struct { - defs map[string]common.OpenAPIDefinition - gvkToSchema map[schema.GroupVersionKind]*spec.Schema + defs map[string]common.OpenAPIDefinition + gvkToRef map[schema.GroupVersionKind]string } // NewDefinitionsSchemaResolver creates a new DefinitionsSchemaResolver. // An example working setup: -// scheme = "k8s.io/client-go/kubernetes/scheme".Scheme // getDefinitions = "k8s.io/kubernetes/pkg/generated/openapi".GetOpenAPIDefinitions -func NewDefinitionsSchemaResolver(scheme *runtime.Scheme, getDefinitions common.GetOpenAPIDefinitions) *DefinitionsSchemaResolver { - gvkToSchema := make(map[schema.GroupVersionKind]*spec.Schema) - namer := openapi.NewDefinitionNamer(scheme) +// scheme = "k8s.io/client-go/kubernetes/scheme".Scheme +func NewDefinitionsSchemaResolver(getDefinitions common.GetOpenAPIDefinitions, schemes ...*runtime.Scheme) *DefinitionsSchemaResolver { + gvkToRef := make(map[schema.GroupVersionKind]string) + namer := openapi.NewDefinitionNamer(schemes...) defs := getDefinitions(func(path string) spec.Ref { return spec.MustCreateRef(path) }) - for name, def := range defs { + for name := range defs { _, e := namer.GetDefinitionName(name) gvks := extensionsToGVKs(e) - s := def.Schema // map value not addressable, make copy for _, gvk := range gvks { - gvkToSchema[gvk] = &s + gvkToRef[gvk] = name } } return &DefinitionsSchemaResolver{ - gvkToSchema: gvkToSchema, - defs: defs, + gvkToRef: gvkToRef, + defs: defs, } } func (d *DefinitionsSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - s, ok := d.gvkToSchema[gvk] + ref, ok := d.gvkToRef[gvk] if !ok { return nil, fmt.Errorf("cannot resolve %v: %w", gvk, ErrSchemaNotFound) } - s, err := populateRefs(func(ref string) (*spec.Schema, bool) { + s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { // find the schema by the ref string, and return a deep copy def, ok := d.defs[ref] if !ok { @@ -70,7 +69,7 @@ func (d *DefinitionsSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) ( } s := def.Schema return &s, true - }, s) + }, ref) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go index 53cbc7054..9c6cefce8 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go @@ -53,34 +53,34 @@ func (r *ClientDiscoveryResolver) ResolveSchema(gvk schema.GroupVersionKind) (*s if err != nil { return nil, err } - s, err := resolveType(resp, gvk) + ref, err := resolveRef(resp, gvk) if err != nil { return nil, err } - s, err = populateRefs(func(ref string) (*spec.Schema, bool) { + s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { s, ok := resp.Components.Schemas[strings.TrimPrefix(ref, refPrefix)] return s, ok - }, s) + }, ref) if err != nil { return nil, err } return s, nil } -func resolveType(resp *schemaResponse, gvk schema.GroupVersionKind) (*spec.Schema, error) { - for _, s := range resp.Components.Schemas { +func resolveRef(resp *schemaResponse, gvk schema.GroupVersionKind) (string, error) { + for ref, s := range resp.Components.Schemas { var gvks []schema.GroupVersionKind err := s.Extensions.GetObject(extGVK, &gvks) if err != nil { - return nil, err + return "", err } for _, g := range gvks { if g == gvk { - return s, nil + return ref, nil } } } - return nil, fmt.Errorf("cannot resolve group version kind %q: %w", gvk, ErrSchemaNotFound) + return "", fmt.Errorf("cannot resolve group version kind %q: %w", gvk, ErrSchemaNotFound) } func resourcePathFromGV(gv schema.GroupVersion) string { diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go index 49321bab4..56e2a4bbd 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go @@ -19,19 +19,41 @@ package resolver import ( "fmt" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kube-openapi/pkg/validation/spec" ) -// populateRefs recursively replaces Refs in the schema with the referred one. +// PopulateRefs recursively replaces Refs in the schema with the referred one. // schemaOf is the callback to find the corresponding schema by the ref. // This function will not mutate the original schema. If the schema needs to be // mutated, a copy will be returned, otherwise it returns the original schema. -func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), schema *spec.Schema) (*spec.Schema, error) { +func PopulateRefs(schemaOf func(ref string) (*spec.Schema, bool), rootRef string) (*spec.Schema, error) { + visitedRefs := sets.New[string]() + rootSchema, ok := schemaOf(rootRef) + visitedRefs.Insert(rootRef) + if !ok { + return nil, fmt.Errorf("internal error: cannot resolve Ref for root schema %q: %w", rootRef, ErrSchemaNotFound) + } + return populateRefs(schemaOf, visitedRefs, rootSchema) +} + +func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.Set[string], schema *spec.Schema) (*spec.Schema, error) { result := *schema changed := false ref, isRef := refOf(schema) if isRef { + if visited.Has(ref) { + return &spec.Schema{ + // for circular ref, return an empty object as placeholder + SchemaProps: spec.SchemaProps{Type: []string{"object"}}, + }, nil + } + visited.Insert(ref) + // restore visited state at the end of the recursion. + defer func() { + visited.Delete(ref) + }() // replace the whole schema with the referred one. resolved, ok := schemaOf(ref) if !ok { @@ -44,7 +66,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), schema *spec.S props := make(map[string]spec.Schema, len(schema.Properties)) propsChanged := false for name, prop := range result.Properties { - populated, err := populateRefs(schemaOf, &prop) + populated, err := populateRefs(schemaOf, visited, &prop) if err != nil { return nil, err } @@ -58,7 +80,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), schema *spec.S result.Properties = props } if result.AdditionalProperties != nil && result.AdditionalProperties.Schema != nil { - populated, err := populateRefs(schemaOf, result.AdditionalProperties.Schema) + populated, err := populateRefs(schemaOf, visited, result.AdditionalProperties.Schema) if err != nil { return nil, err } @@ -69,7 +91,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), schema *spec.S } // schema is a list, populate its items if result.Items != nil && result.Items.Schema != nil { - populated, err := populateRefs(schemaOf, result.Items.Schema) + populated, err := populateRefs(schemaOf, visited, result.Items.Schema) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go index 61a7fd70d..254a22348 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go @@ -229,7 +229,6 @@ func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName st } func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) { - klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) if rdm.apiGroups == nil { rdm.apiGroups = make(map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery) @@ -273,6 +272,7 @@ func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupN } rdm.apiGroups[key] = group } + klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) gv := metav1.GroupVersion{Group: groupName, Version: value.Version} gvKey := groupVersionKey{ diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 4803975a7..a6d293a15 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -164,7 +164,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. req = req.WithContext(request.WithUser(ctx, newUser)) oldUser, _ := request.UserFrom(ctx) - httplog.LogOf(req, w).Addf("%v is acting as %v", oldUser, newUser) + httplog.LogOf(req, w).Addf("%v is impersonating %v", userString(oldUser), userString(newUser)) ae := audit.AuditEventFrom(ctx) audit.LogImpersonatedUser(ae, newUser) @@ -183,6 +183,24 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. }) } +func userString(u user.Info) string { + if u == nil { + return "" + } + b := strings.Builder{} + if name := u.GetName(); name == "" { + b.WriteString("") + } else { + b.WriteString(name) + } + if groups := u.GetGroups(); len(groups) > 0 { + b.WriteString("[") + b.WriteString(strings.Join(groups, ",")) + b.WriteString("]") + } + return b.String() +} + func unescapeExtraKey(encodedKey string) string { key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go index 67a1790c5..1ecf59d45 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go @@ -20,6 +20,7 @@ import ( "net/http" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" tracing "k8s.io/component-base/tracing" @@ -32,7 +33,15 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { otelhttp.WithPublicEndpoint(), otelhttp.WithTracerProvider(tp), } + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the http.target attribute to the otelhttp span + // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 + if r.URL != nil { + trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) + } + handler.ServeHTTP(w, r) + }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(handler, "KubernetesAPI", opts...) + return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index c110964fc..d3b501cf5 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -267,7 +267,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc } requestInfo, _ := request.RequestInfoFrom(ctx) metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { - serveWatch(watcher, scope, outputMediaType, req, w, timeout) + serveWatch(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts)) }) return } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go index 7f8556369..2c2d3e482 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -77,6 +77,96 @@ func (lazy *lazyAccept) String() string { return "unknown" } +// lazyAPIGroup implements String() string and it will +// lazily get Group from request info. +type lazyAPIGroup struct { + req *http.Request +} + +func (lazy *lazyAPIGroup) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.APIGroup + } + } + + return "unknown" +} + +// lazyAPIVersion implements String() string and it will +// lazily get Group from request info. +type lazyAPIVersion struct { + req *http.Request +} + +func (lazy *lazyAPIVersion) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.APIVersion + } + } + + return "unknown" +} + +// lazyName implements String() string and it will +// lazily get Group from request info. +type lazyName struct { + req *http.Request +} + +func (lazy *lazyName) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Name + } + } + + return "unknown" +} + +// lazySubresource implements String() string and it will +// lazily get Group from request info. +type lazySubresource struct { + req *http.Request +} + +func (lazy *lazySubresource) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Subresource + } + } + + return "unknown" +} + +// lazyNamespace implements String() string and it will +// lazily get Group from request info. +type lazyNamespace struct { + req *http.Request +} + +func (lazy *lazyNamespace) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Namespace + } + } + + return "unknown" +} + // lazyAuditID implements Stringer interface to lazily retrieve // the audit ID associated with the request. type lazyAuditID struct { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go index cf3205a9a..57766924c 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go @@ -18,7 +18,10 @@ package metrics import ( "context" + "sync" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) type RequestBodyVerb string @@ -35,8 +38,8 @@ var ( RequestBodySizes = metrics.NewHistogramVec( &metrics.HistogramOpts{ Subsystem: "apiserver", - Name: "request_body_sizes", - Help: "Apiserver request body sizes broken out by size.", + Name: "request_body_size_bytes", + Help: "Apiserver request body size in bytes broken out by resource and verb.", // we use 0.05 KB as the smallest bucket with 0.1 KB increments up to the // apiserver limit. Buckets: metrics.LinearBuckets(50000, 100000, 31), @@ -46,6 +49,15 @@ var ( ) ) +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(RequestBodySizes) + }) +} + func RecordRequestBodySize(ctx context.Context, resource string, verb RequestBodyVerb, size int) { RequestBodySizes.WithContext(ctx).WithLabelValues(resource, string(verb)).Observe(float64(size)) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 4780c59fd..348b1092d 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -18,8 +18,11 @@ package handlers import ( "context" + "encoding/json" "fmt" + "io" "net/http" + "reflect" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -29,48 +32,228 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/metrics" endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" + + klog "k8s.io/klog/v2" ) -// transformObject takes the object as returned by storage and ensures it is in -// the client's desired form, as well as ensuring any API level fields like self-link -// are properly set. -func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { +// watchEmbeddedEncoder performs encoding of the embedded object. +// +// NOTE: watchEmbeddedEncoder is NOT thread-safe. +type watchEmbeddedEncoder struct { + encoder runtime.Encoder + + ctx context.Context + + // target, if non-nil, configures transformation type. + // The other options are ignored if target is nil. + target *schema.GroupVersionKind + tableOptions *metav1.TableOptions + scope *RequestScope + + // identifier of the encoder, computed lazily + identifier runtime.Identifier +} + +func newWatchEmbeddedEncoder(ctx context.Context, encoder runtime.Encoder, target *schema.GroupVersionKind, tableOptions *metav1.TableOptions, scope *RequestScope) *watchEmbeddedEncoder { + return &watchEmbeddedEncoder{ + encoder: encoder, + ctx: ctx, + target: target, + tableOptions: tableOptions, + scope: scope, + } +} + +// Encode implements runtime.Encoder interface. +func (e *watchEmbeddedEncoder) Encode(obj runtime.Object, w io.Writer) error { if co, ok := obj.(runtime.CacheableObject); ok { - if mediaType.Convert != nil { - // Non-nil mediaType.Convert means that some conversion of the object - // has to happen. Currently conversion may potentially modify the - // object or assume something about it (e.g. asTable operates on - // reflection, which won't work for any wrapper). - // To ensure it will work correctly, let's operate on base objects - // and not cache it for now. - // - // TODO: Long-term, transformObject should be changed so that it - // implements runtime.Encoder interface. - return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req) - } + return co.CacheEncode(e.Identifier(), e.doEncode, w) } - return doTransformObject(ctx, obj, opts, mediaType, scope, req) + return e.doEncode(obj, w) } -func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { - if _, ok := obj.(*metav1.Status); ok { - return obj, nil +func (e *watchEmbeddedEncoder) doEncode(obj runtime.Object, w io.Writer) error { + result, err := doTransformObject(e.ctx, obj, e.tableOptions, e.target, e.scope) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) + result = obj } - // ensure that for empty lists we don't return items. - // This is safe to modify without deep-copying the object, as - // List objects themselves are never cached. - if meta.IsListType(obj) && meta.LenList(obj) == 0 { - if err := meta.SetList(obj, []runtime.Object{}); err != nil { - return nil, err - } + // When we are tranforming to a table, use the original table options when + // we should print headers only on the first object - headers should be + // omitted on subsequent events. + if e.tableOptions != nil && !e.tableOptions.NoHeaders { + e.tableOptions.NoHeaders = true + // With options change, we should recompute the identifier. + // Clearing this will trigger lazy recompute when needed. + e.identifier = "" } - switch target := mediaType.Convert; { + return e.encoder.Encode(result, w) +} + +// Identifier implements runtime.Encoder interface. +func (e *watchEmbeddedEncoder) Identifier() runtime.Identifier { + if e.identifier == "" { + e.identifier = e.embeddedIdentifier() + } + return e.identifier +} + +type watchEmbeddedEncoderIdentifier struct { + Name string `json:"name,omitempty"` + Encoder string `json:"encoder,omitempty"` + Target string `json:"target,omitempty"` + Options metav1.TableOptions `json:"options,omitempty"` + NoHeaders bool `json:"noHeaders,omitempty"` +} + +func (e *watchEmbeddedEncoder) embeddedIdentifier() runtime.Identifier { + if e.target == nil { + // If no conversion is performed, we effective only use + // the embedded identifier. + return e.encoder.Identifier() + } + identifier := watchEmbeddedEncoderIdentifier{ + Name: "watch-embedded", + Encoder: string(e.encoder.Identifier()), + Target: e.target.String(), + } + if e.target.Kind == "Table" && e.tableOptions != nil { + identifier.Options = *e.tableOptions + identifier.NoHeaders = e.tableOptions.NoHeaders + } + + result, err := json.Marshal(identifier) + if err != nil { + klog.Fatalf("Failed marshaling identifier for watchEmbeddedEncoder: %v", err) + } + return runtime.Identifier(result) +} + +// watchEncoder performs encoding of the watch events. +// +// NOTE: watchEncoder is NOT thread-safe. +type watchEncoder struct { + ctx context.Context + kind schema.GroupVersionKind + embeddedEncoder runtime.Encoder + encoder runtime.Encoder + framer io.Writer + + buffer runtime.Splice + eventBuffer runtime.Splice + + currentEmbeddedIdentifier runtime.Identifier + identifiers map[watch.EventType]runtime.Identifier +} + +func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer) *watchEncoder { + return &watchEncoder{ + ctx: ctx, + kind: kind, + embeddedEncoder: embeddedEncoder, + encoder: encoder, + framer: framer, + buffer: runtime.NewSpliceBuffer(), + eventBuffer: runtime.NewSpliceBuffer(), + } +} + +// Encode encodes a given watch event. +// NOTE: if events object is implementing the CacheableObject interface, +// +// the serialized version is cached in that object [not the event itself]. +func (e *watchEncoder) Encode(event watch.Event) error { + encodeFunc := func(obj runtime.Object, w io.Writer) error { + return e.doEncode(obj, event, w) + } + if co, ok := event.Object.(runtime.CacheableObject); ok { + return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer) + } + return encodeFunc(event.Object, e.framer) +} + +func (e *watchEncoder) doEncode(obj runtime.Object, event watch.Event, w io.Writer) error { + defer e.buffer.Reset() + + if err := e.embeddedEncoder.Encode(obj, e.buffer); err != nil { + return fmt.Errorf("unable to encode watch object %T: %v", obj, err) + } + + // ContentType is not required here because we are defaulting to the serializer type. + outEvent := &metav1.WatchEvent{ + Type: string(event.Type), + Object: runtime.RawExtension{Raw: e.buffer.Bytes()}, + } + metrics.WatchEventsSizes.WithContext(e.ctx).WithLabelValues(e.kind.Group, e.kind.Version, e.kind.Kind).Observe(float64(len(outEvent.Object.Raw))) + + defer e.eventBuffer.Reset() + if err := e.encoder.Encode(outEvent, e.eventBuffer); err != nil { + return fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e) + } + + _, err := w.Write(e.eventBuffer.Bytes()) + return err +} + +type watchEncoderIdentifier struct { + Name string `json:"name,omitempty"` + EmbeddedEncoder string `json:"embeddedEncoder,omitempty"` + Encoder string `json:"encoder,omitempty"` + EventType string `json:"eventType,omitempty"` +} + +func (e *watchEncoder) identifier(eventType watch.EventType) runtime.Identifier { + // We need to take into account that in embeddedEncoder includes table + // transformer, then its identifier is dynamic. As a result, whenever + // the identifier of embeddedEncoder changes, we need to invalidate the + // whole identifiers cache. + // TODO(wojtek-t): Can we optimize it somehow? + if e.currentEmbeddedIdentifier != e.embeddedEncoder.Identifier() { + e.currentEmbeddedIdentifier = e.embeddedEncoder.Identifier() + e.identifiers = map[watch.EventType]runtime.Identifier{} + } + if _, ok := e.identifiers[eventType]; !ok { + e.identifiers[eventType] = e.typeIdentifier(eventType) + } + return e.identifiers[eventType] +} + +func (e *watchEncoder) typeIdentifier(eventType watch.EventType) runtime.Identifier { + // The eventType is a non-standard pattern. This is coming from the fact + // that we're effectively serializing the whole watch event, but storing + // it in serializations of the Object within the watch event. + identifier := watchEncoderIdentifier{ + Name: "watch", + EmbeddedEncoder: string(e.embeddedEncoder.Identifier()), + Encoder: string(e.encoder.Identifier()), + EventType: string(eventType), + } + + result, err := json.Marshal(identifier) + if err != nil { + klog.Fatalf("Failed marshaling identifier for watchEncoder: %v", err) + } + return runtime.Identifier(result) +} + +// doTransformResponseObject is used for handling all requests, including watch. +func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, target *schema.GroupVersionKind, scope *RequestScope) (runtime.Object, error) { + if _, ok := obj.(*metav1.Status); ok { + return obj, nil + } + + switch { case target == nil: + // If we ever change that from a no-op, the identifier of + // the watchEmbeddedEncoder has to be adjusted accordingly. return obj, nil case target.Kind == "PartialObjectMetadata": @@ -128,6 +311,7 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media // transformResponseObject takes an object loaded from storage and performs any necessary transformations. // Will write the complete response object. +// transformResponseObject is used only for handling non-streaming requests. func transformResponseObject(ctx context.Context, scope *RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) { options, err := optionsForTransform(mediaType, req) if err != nil { @@ -135,9 +319,19 @@ func transformResponseObject(ctx context.Context, scope *RequestScope, req *http return } + // ensure that for empty lists we don't return items. + // This is safe to modify without deep-copying the object, as + // List objects themselves are never cached. + if meta.IsListType(result) && meta.LenList(result) == 0 { + if err := meta.SetList(result, []runtime.Object{}); err != nil { + scope.err(err, w, req) + return + } + } + var obj runtime.Object do := func() { - obj, err = transformObject(ctx, result, options, mediaType, scope, req) + obj, err = doTransformObject(ctx, result, options, mediaType.Convert, scope) } endpointsrequest.TrackTransformResponseObjectLatency(ctx, do) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go index 7d273d622..760c9bf40 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go @@ -27,6 +27,11 @@ func traceFields(req *http.Request) []attribute.KeyValue { attribute.Stringer("accept", &lazyAccept{req: req}), attribute.Stringer("audit-id", &lazyAuditID{req: req}), attribute.Stringer("client", &lazyClientIP{req: req}), + attribute.Stringer("api-group", &lazyAPIGroup{req: req}), + attribute.Stringer("api-version", &lazyAPIVersion{req: req}), + attribute.Stringer("name", &lazyName{req: req}), + attribute.Stringer("subresource", &lazySubresource{req: req}), + attribute.Stringer("namespace", &lazyNamespace{req: req}), attribute.String("protocol", req.Proto), attribute.Stringer("resource", &lazyResource{req: req}), attribute.Stringer("scope", &lazyScope{req: req}), diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index 79cb11ca6..6e86b79be 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -19,9 +19,7 @@ package handlers import ( "bytes" "fmt" - "io" "net/http" - "reflect" "time" "golang.org/x/net/websocket" @@ -29,13 +27,15 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/httpstream/wsstream" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) // nothing will ever be sent down this channel @@ -63,7 +63,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { // serveWatch will serve a watch response. // TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. -func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) { +func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) { defer watcher.Stop() options, err := optionsForTransform(mediaTypeOptions, req) @@ -92,6 +92,8 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n mediaType += ";stream=watch" } + ctx := req.Context() + // locate the appropriate embedded encoder based on the transform var embeddedEncoder runtime.Encoder contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req) @@ -106,13 +108,41 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) } + var memoryAllocator runtime.MemoryAllocator + + if encoderWithAllocator, supportsAllocator := embeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { + // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. + // instead, we allocate the buffer for the entire watch session and release it when we close the connection. + memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) + defer runtime.AllocatorPool.Put(memoryAllocator) + embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) + } + var tableOptions *metav1.TableOptions + if options != nil { + if passedOptions, ok := options.(*metav1.TableOptions); ok { + tableOptions = passedOptions + } else { + scope.err(fmt.Errorf("unexpected options type: %T", options), w, req) + return + } + } + embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope) + + if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator { + if memoryAllocator == nil { + // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. + // instead, we allocate the buffer for the entire watch session and release it when we close the connection. + memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) + defer runtime.AllocatorPool.Put(memoryAllocator) + } + encoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) + } + var serverShuttingDownCh <-chan struct{} if signals := apirequest.ServerShutdownSignalFrom(req.Context()); signals != nil { serverShuttingDownCh = signals.ShuttingDown() } - ctx := req.Context() - server := &WatchServer{ Watching: watcher, Scope: scope, @@ -123,23 +153,10 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n Encoder: encoder, EmbeddedEncoder: embeddedEncoder, - Fixup: func(obj runtime.Object) runtime.Object { - result, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) - return obj - } - // When we are transformed to a table, use the table options as the state for whether we - // should print headers - on watch, we only want to print table headers on the first object - // and omit them on subsequent events. - if tableOptions, ok := options.(*metav1.TableOptions); ok { - tableOptions.NoHeaders = true - } - return result - }, - TimeoutFactory: &realTimeoutFactory{timeout}, ServerShuttingDownCh: serverShuttingDownCh, + + metricsScope: metricsScope, } server.ServeHTTP(w, req) @@ -160,11 +177,11 @@ type WatchServer struct { Encoder runtime.Encoder // used to encode the nested object in the watch stream EmbeddedEncoder runtime.Encoder - // used to correct the object before we send it to the serializer - Fixup func(runtime.Object) runtime.Object TimeoutFactory TimeoutFactory ServerShuttingDownCh <-chan struct{} + + metricsScope string } // ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked @@ -195,17 +212,6 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } - var e streaming.Encoder - var memoryAllocator runtime.MemoryAllocator - - if encoder, supportsAllocator := s.Encoder.(runtime.EncoderWithAllocator); supportsAllocator { - memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) - defer runtime.AllocatorPool.Put(memoryAllocator) - e = streaming.NewEncoderWithAllocator(framer, encoder, memoryAllocator) - } else { - e = streaming.NewEncoder(framer, s.Encoder) - } - // ensure the connection times out timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh() defer cleanup() @@ -216,26 +222,10 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusOK) flusher.Flush() - var unknown runtime.Unknown - internalEvent := &metav1.InternalEvent{} - outEvent := &metav1.WatchEvent{} - buf := runtime.NewSpliceBuffer() + watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer) ch := s.Watching.ResultChan() done := req.Context().Done() - embeddedEncodeFn := s.EmbeddedEncoder.Encode - if encoder, supportsAllocator := s.EmbeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { - if memoryAllocator == nil { - // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. - // instead, we allocate the buffer for the entire watch session and release it when we close the connection. - memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) - defer runtime.AllocatorPool.Put(memoryAllocator) - } - embeddedEncodeFn = func(obj runtime.Object, w io.Writer) error { - return encoder.EncodeWithAllocator(obj, w, memoryAllocator) - } - } - for { select { case <-s.ServerShuttingDownCh: @@ -257,42 +247,20 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } metrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + isWatchListLatencyRecordingRequired := shouldRecordWatchListLatency(event) - obj := s.Fixup(event.Object) - if err := embeddedEncodeFn(obj, buf); err != nil { - // unexpected error - utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) - return - } - - // ContentType is not required here because we are defaulting to the serializer - // type - unknown.Raw = buf.Bytes() - event.Object = &unknown - metrics.WatchEventsSizes.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw))) - - *outEvent = metav1.WatchEvent{} - - // create the external type directly and encode it. Clients will only recognize the serialization we provide. - // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way - // and we get the benefit of using conversion functions which already have to stay in sync - *internalEvent = metav1.InternalEvent(event) - err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) - // client disconnect. - return - } - if err := e.Encode(outEvent); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)) + if err := watchEncoder.Encode(event); err != nil { + utilruntime.HandleError(err) // client disconnect. return } + if len(ch) == 0 { flusher.Flush() } - - buf.Reset() + if isWatchListLatencyRecordingRequired { + metrics.RecordWatchListLatency(req.Context(), s.Scope.Resource, s.metricsScope) + } } } } @@ -326,10 +294,10 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { // End of results. return } - obj := s.Fixup(event.Object) - if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { + + if err := s.EmbeddedEncoder.Encode(event.Object, buf); err != nil { // unexpected error - utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", event.Object, err)) return } @@ -371,3 +339,19 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { } } } + +func shouldRecordWatchListLatency(event watch.Event) bool { + if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + // as of today the initial-events-end annotation is added only to a single event + // by the watch cache and only when certain conditions are met + // + // for more please read https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list + hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to determine if the obj has the required annotation for measuring watchlist latency, obj %T: %v", event.Object, err)) + return false + } + return hasAnnotation +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 042bd802f..ffd4a7dcb 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -796,7 +796,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -817,7 +817,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). Returns(http.StatusOK, "OK", versionedList). @@ -850,7 +850,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PUT(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -879,7 +879,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PATCH(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Consumes(supportedTypes...). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). @@ -909,7 +909,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.POST(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -938,7 +938,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(deleteReturnType). @@ -962,7 +962,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(versionedStatus). @@ -990,7 +990,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). @@ -1011,7 +1011,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index ba2aed69d..48fc951ad 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "fmt" "net/http" "net/url" "strconv" @@ -26,8 +27,12 @@ import ( "time" restful "github.com/emicklei/go-restful/v3" + + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/user" @@ -280,6 +285,17 @@ var ( []string{"code_path"}, ) + watchListLatencies = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Subsystem: APIServerComponent, + Name: "watch_list_duration_seconds", + Help: "Response latency distribution in seconds for watch list requests broken by group, version, resource and scope.", + Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2, 4, 6, 8, 10, 15, 20, 30, 45, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "resource", "scope"}, + ) + metrics = []resettableCollector{ deprecatedRequestGauge, requestCounter, @@ -300,6 +316,7 @@ var ( requestAbortsTotal, requestPostTimeoutTotal, requestTimestampComparisonDuration, + watchListLatencies, } // these are the valid request methods which we report in our metrics. Any other request methods @@ -511,6 +528,18 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp fn() } +// RecordWatchListLatency simply records response latency for watch list requests. +func RecordWatchListLatency(ctx context.Context, gvr schema.GroupVersionResource, metricsScope string) { + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) + if !ok { + utilruntime.HandleError(fmt.Errorf("unable to measure watchlist latency because no received ts found in the ctx, gvr: %s", gvr)) + return + } + elapsedSeconds := time.Since(requestReceivedTimestamp).Seconds() + + watchListLatencies.WithContext(ctx).WithLabelValues(gvr.Group, gvr.Version, gvr.Resource, metricsScope).Observe(elapsedSeconds) +} + // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, httpCode, respSize int, elapsed time.Duration) { @@ -621,6 +650,26 @@ func CleanScope(requestInfo *request.RequestInfo) string { return "" } +// CleanListScope computes the request scope for metrics. +// +// Note that normally we would use CleanScope for computation. +// But due to the same reasons mentioned in determineRequestNamespaceAndName we cannot. +func CleanListScope(ctx context.Context, opts *metainternalversion.ListOptions) string { + namespace, name := determineRequestNamespaceAndName(ctx, opts) + if len(name) > 0 { + return "resource" + } + if len(namespace) > 0 { + return "namespace" + } + if requestInfo, ok := request.RequestInfoFrom(ctx); ok { + if requestInfo.IsResourceRequest { + return "cluster" + } + } + return "" +} + // CanonicalVerb distinguishes LISTs from GETs (and HEADs). It assumes verb is // UPPERCASE. func CanonicalVerb(verb string, scope string) string { @@ -655,6 +704,30 @@ func CleanVerb(verb string, request *http.Request, requestInfo *request.RequestI return reportedVerb } +// determineRequestNamespaceAndName computes name and namespace for the given requests +// +// note that the logic of this function was copy&pasted from cacher.go +// after an unsuccessful attempt of moving it to RequestInfo +// +// see: https://github.com/kubernetes/kubernetes/pull/120520 +func determineRequestNamespaceAndName(ctx context.Context, opts *metainternalversion.ListOptions) (namespace, name string) { + if requestNamespace, ok := request.NamespaceFrom(ctx); ok && len(requestNamespace) > 0 { + namespace = requestNamespace + } else if opts != nil && opts.FieldSelector != nil { + if selectorNamespace, ok := opts.FieldSelector.RequiresExactMatch("metadata.namespace"); ok { + namespace = selectorNamespace + } + } + if requestInfo, ok := request.RequestInfoFrom(ctx); ok && requestInfo != nil && len(requestInfo.Name) > 0 { + name = requestInfo.Name + } else if opts != nil && opts.FieldSelector != nil { + if selectorName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { + name = selectorName + } + } + return +} + // cleanVerb additionally ensures that unknown verbs don't clog up the metrics. func cleanVerb(verb, suggestedVerb string, request *http.Request, requestInfo *request.RequestInfo) string { // CanonicalVerb (being an input for this function) doesn't handle correctly the diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 2276e2daf..f059cef9b 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -54,6 +54,7 @@ const ( // owner: @smarterclayton // alpha: v1.8 // beta: v1.9 + // stable: 1.29 // // Allow API clients to retrieve resource lists in chunks rather than // all at once. @@ -62,6 +63,7 @@ const ( // owner: @MikeSpreitzer @yue9944882 // alpha: v1.18 // beta: v1.20 + // stable: 1.29 // // Enables managing request concurrency with prioritization and fairness at each server. // The FeatureGate was introduced in release 1.15 but the feature @@ -99,6 +101,7 @@ const ( // kep: https://kep.k8s.io/2876 // alpha: v1.23 // beta: v1.25 + // stable: v1.29 // // Enables expression validation for Custom Resource CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" @@ -121,6 +124,7 @@ const ( // kep: https://kep.k8s.io/3299 // alpha: v1.25 // beta: v1.27 + // stable: v1.29 // // Enables KMS v2 API for encryption at rest. KMSv2 featuregate.Feature = "KMSv2" @@ -128,6 +132,7 @@ const ( // owner: @enj // kep: https://kep.k8s.io/3299 // beta: v1.28 + // stable: v1.29 // // Enables the use of derived encryption keys with KMS v2. KMSv2KDF featuregate.Feature = "KMSv2KDF" @@ -141,18 +146,10 @@ const ( // in the spec returned from kube-apiserver. OpenAPIEnums featuregate.Feature = "OpenAPIEnums" - // owner: @jefftree - // kep: https://kep.k8s.io/2896 - // alpha: v1.23 - // beta: v1.24 - // stable: v1.27 - // - // Enables kubernetes to publish OpenAPI v3 - OpenAPIV3 featuregate.Feature = "OpenAPIV3" - // owner: @caesarxuchao // alpha: v1.15 // beta: v1.16 + // stable: 1.29 // // Allow apiservers to show a count of remaining items in the response // to a chunking list request. @@ -214,6 +211,20 @@ const ( // document. StorageVersionHash featuregate.Feature = "StorageVersionHash" + // owner: @aramase, @enj, @nabokihms + // kep: https://kep.k8s.io/3331 + // alpha: v1.29 + // + // Enables Structured Authentication Configuration + StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" + + // owner: @palnabarun + // kep: https://kep.k8s.io/3221 + // alpha: v1.29 + // + // Enables Structured Authorization Configuration + StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" + // owner: @wojtek-t // alpha: v1.15 // beta: v1.16 @@ -241,6 +252,14 @@ const ( // // Allow the API server to serve consistent lists from cache ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" + + // owner: @tkashem + // beta: v1.29 + // + // Allow Priority & Fairness in the API server to use a zero value for + // the 'nominalConcurrencyShares' field of the 'limited' section of a + // priority level. + ZeroLimitedNominalConcurrencyShares featuregate.Feature = "ZeroLimitedNominalConcurrencyShares" ) func init() { @@ -256,9 +275,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.Beta}, - APIListChunking: {Default: true, PreRelease: featuregate.Beta}, + APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, + APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, @@ -268,21 +287,19 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Beta}, - CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, + CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - KMSv1: {Default: true, PreRelease: featuregate.Deprecated}, + KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, - KMSv2: {Default: true, PreRelease: featuregate.Beta}, + KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - KMSv2KDF: {Default: false, PreRelease: featuregate.Beta}, // default and lock to true in 1.29, remove in 1.31 + KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, + RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -294,7 +311,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta}, + StructuredAuthenticationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + + StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + + UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -303,4 +324,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchList: {Default: false, PreRelease: featuregate.Alpha}, ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha}, + + ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index 3983c92d0..3c974f398 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -44,7 +44,7 @@ func StorageWithCacher() generic.StorageDecorator { triggerFuncs storage.IndexerFuncs, indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { - s, d, err := generic.NewRawStorage(storageConfig, newFunc) + s, d, err := generic.NewRawStorage(storageConfig, newFunc, newListFunc, resourcePrefix) if err != nil { return s, d, err } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 715aa1047..4c2b2fc0e 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -47,12 +47,12 @@ func UndecoratedStorage( getAttrsFunc storage.AttrFunc, trigger storage.IndexerFuncs, indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { - return NewRawStorage(config, newFunc) + return NewRawStorage(config, newFunc, newListFunc, resourcePrefix) } // NewRawStorage creates the low level kv storage. This is a work-around for current // two layer of same storage interface. // TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method. -func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { - return factory.Create(*config, newFunc) +func NewRawStorage(config *storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, factory.DestroyFunc, error) { + return factory.Create(*config, newFunc, newListFunc, resourcePrefix) } diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index d678f52df..beff08f14 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -78,6 +78,7 @@ import ( "k8s.io/component-base/tracing" "k8s.io/klog/v2" openapicommon "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/utils/clock" utilsnet "k8s.io/utils/net" @@ -194,7 +195,7 @@ type Config struct { // OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults. OpenAPIConfig *openapicommon.Config // OpenAPIV3Config will be used in generating OpenAPI V3 spec. This is nil by default. Use DefaultOpenAPIV3Config for "working" defaults. - OpenAPIV3Config *openapicommon.Config + OpenAPIV3Config *openapicommon.OpenAPIV3Config // SkipOpenAPIInstallation avoids installing the OpenAPI handler if set to true. SkipOpenAPIInstallation bool @@ -482,8 +483,23 @@ func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, de } // DefaultOpenAPIV3Config provides the default OpenAPIV3Config used to build the OpenAPI V3 spec -func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { - defaultConfig := DefaultOpenAPIConfig(getDefinitions, defNamer) +func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.OpenAPIV3Config { + defaultConfig := &openapicommon.OpenAPIV3Config{ + IgnorePrefixes: []string{}, + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Generic API Server", + }, + }, + DefaultResponse: &spec3.Response{ + ResponseProps: spec3.ResponseProps{ + Description: "Default Response.", + }, + }, + GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags, + GetDefinitionName: defNamer.GetDefinitionName, + GetDefinitions: getDefinitions, + } defaultConfig.Definitions = getDefinitions(func(name string) spec.Ref { defName, _ := defaultConfig.GetDefinitionName(name) return spec.MustCreateRef("#/components/schemas/" + openapicommon.EscapeJsonPointer(defName)) @@ -608,6 +624,45 @@ func completeOpenAPI(config *openapicommon.Config, version *version.Info) { } } +func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Info) { + if config == nil { + return + } + if config.SecuritySchemes != nil { + // Setup OpenAPI security: all APIs will have the same authentication for now. + config.DefaultSecurity = []map[string][]string{} + keys := []string{} + for k := range config.SecuritySchemes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + config.DefaultSecurity = append(config.DefaultSecurity, map[string][]string{k: {}}) + } + if config.CommonResponses == nil { + config.CommonResponses = map[int]*spec3.Response{} + } + if _, exists := config.CommonResponses[http.StatusUnauthorized]; !exists { + config.CommonResponses[http.StatusUnauthorized] = &spec3.Response{ + ResponseProps: spec3.ResponseProps{ + Description: "Unauthorized", + }, + } + } + } + // make sure we populate info, and info.version, if not manually set + if config.Info == nil { + config.Info = &spec.Info{} + } + if config.Info.Version == "" { + if version != nil { + config.Info.Version = strings.Split(version.String(), "-")[0] + } else { + config.Info.Version = "unversioned" + } + } +} + // DrainedNotify returns a lifecycle signal of genericapiserver already drained while shutting down. func (c *Config) DrainedNotify() <-chan struct{} { return c.lifecycleSignals.InFlightRequestsDrained.Signaled() @@ -633,7 +688,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } completeOpenAPI(c.OpenAPIConfig, c.Version) - completeOpenAPI(c.OpenAPIV3Config, c.Version) + completeOpenAPIV3(c.OpenAPIV3Config, c.Version) if c.DiscoveryAddresses == nil { c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} @@ -669,6 +724,12 @@ func (c *RecommendedConfig) Complete() CompletedConfig { return c.Config.Complete(c.SharedInformerFactory) } +var allowedMediaTypes = []string{ + runtime.ContentTypeJSON, + runtime.ContentTypeYAML, + runtime.ContentTypeProtobuf, +} + // New creates a new server which logically combines the handling chain with the passed server. // name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delegating. // delegationTarget may not be nil. @@ -676,6 +737,18 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.Serializer == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") } + for _, info := range c.Serializer.SupportedMediaTypes() { + var ok bool + for _, mt := range allowedMediaTypes { + if info.MediaType == mt { + ok = true + break + } + } + if !ok { + return nil, fmt.Errorf("refusing to create new apiserver %q with support for media type %q (allowed media types are: %s)", name, info.MediaType, strings.Join(allowedMediaTypes, ", ")) + } + } if c.LoopbackClientConfig == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil") } @@ -915,7 +988,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { requestWorkEstimator := flowcontrolrequest.NewWorkEstimator( c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats) handler = filterlatency.TrackCompleted(handler) - handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator, c.RequestTimeout/4) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness") } else { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) @@ -994,14 +1067,10 @@ func installAPI(s *GenericAPIServer, c *Config) { if c.EnableMetrics { if c.EnableProfiling { routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) - } + slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) } else { routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) - } + slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) } } @@ -1015,7 +1084,7 @@ func installAPI(s *GenericAPIServer, c *Config) { s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService()) } } - if c.FlowControl != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) { + if c.FlowControl != nil { c.FlowControl.Install(s.Handler.NonGoRestfulMux) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 6dbed6a65..75bc49e99 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -21,7 +21,7 @@ import ( "context" "crypto/x509" "fmt" - "io/ioutil" + "os" "sync/atomic" "time" @@ -98,7 +98,7 @@ func (c *DynamicFileCAContent) AddListener(listener Listener) { // loadCABundle determines the next set of content for the file. func (c *DynamicFileCAContent) loadCABundle() error { - caBundle, err := ioutil.ReadFile(c.filename) + caBundle, err := os.ReadFile(c.filename) if err != nil { return err } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go index 36c4d4586..62aef4992 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -20,7 +20,7 @@ import ( "context" "crypto/tls" "fmt" - "io/ioutil" + "os" "sync/atomic" "time" @@ -80,11 +80,11 @@ func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { // loadCertKeyPair determines the next set of content for the file. func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { - cert, err := ioutil.ReadFile(c.certFile) + cert, err := os.ReadFile(c.certFile) if err != nil { return err } - key, err := ioutil.ReadFile(c.keyFile) + key, err := os.ReadFile(c.keyFile) if err != nil { return err } diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index 2df786e13..ce9a3691a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -18,7 +18,7 @@ package egressselector import ( "fmt" - "io/ioutil" + "os" "strings" "k8s.io/apimachinery/pkg/runtime" @@ -51,7 +51,7 @@ func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSe return nil, nil } // a file was provided, so we just read it. - data, err := ioutil.ReadFile(configFilePath) + data, err := os.ReadFile(configFilePath) if err != nil { return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index 0936d6ef4..a38ef6464 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -22,10 +22,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" "strings" "time" @@ -277,7 +277,7 @@ func getTLSConfig(t *apiserver.TLSConfig) (*tls.Config, error) { } certPool := x509.NewCertPool() if caCert != "" { - certBytes, err := ioutil.ReadFile(caCert) + certBytes, err := os.ReadFile(caCert) if err != nil { return nil, fmt.Errorf("failed to read cert file %s, got %v", caCert, err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 6b3987781..1e91b9a31 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -26,7 +26,7 @@ import ( "sync/atomic" "time" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" apitypes "k8s.io/apimachinery/pkg/types" epmetrics "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" @@ -35,6 +35,7 @@ import ( fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/klog/v2" + utilsclock "k8s.io/utils/clock" ) // PriorityAndFairnessClassification identifies the results of @@ -78,6 +79,10 @@ type priorityAndFairnessHandler struct { // the purpose of computing RetryAfter header to avoid system // overload. droppedRequests utilflowcontrol.DroppedRequestsTracker + + // newReqWaitCtxFn creates a derived context with a deadline + // of how long a given request can wait in its queue. + newReqWaitCtxFn func(context.Context) (context.Context, context.CancelFunc) } func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) { @@ -240,8 +245,9 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque resultCh <- err }() - // We create handleCtx with explicit cancelation function. - // The reason for it is that Handle() underneath may start additional goroutine + // We create handleCtx with an adjusted deadline, for two reasons. + // One is to limit the time the request waits before its execution starts. + // The other reason for it is that Handle() underneath may start additional goroutine // that is blocked on context cancellation. However, from APF point of view, // we don't want to wait until the whole watch request is processed (which is // when it context is actually cancelled) - we want to unblock the goroutine as @@ -249,7 +255,7 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque // // Note that we explicitly do NOT call the actuall handler using that context // to avoid cancelling request too early. - handleCtx, handleCtxCancel := context.WithCancel(ctx) + handleCtx, handleCtxCancel := h.newReqWaitCtxFn(ctx) defer handleCtxCancel() // Note that Handle will return irrespective of whether the request @@ -286,7 +292,11 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque h.handler.ServeHTTP(w, r) } - h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) + func() { + handleCtx, cancelFn := h.newReqWaitCtxFn(ctx) + defer cancelFn() + h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + }() } if !served { @@ -309,6 +319,7 @@ func WithPriorityAndFairness( longRunningRequestCheck apirequest.LongRunningRequestCheck, fcIfc utilflowcontrol.Interface, workEstimator flowcontrolrequest.WorkEstimatorFunc, + defaultRequestWaitLimit time.Duration, ) http.Handler { if fcIfc == nil { klog.Warningf("priority and fairness support not found, skipping") @@ -322,12 +333,18 @@ func WithPriorityAndFairness( waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() }) + clock := &utilsclock.RealClock{} + newReqWaitCtxFn := func(ctx context.Context) (context.Context, context.CancelFunc) { + return getRequestWaitContext(ctx, defaultRequestWaitLimit, clock) + } + priorityAndFairnessHandler := &priorityAndFairnessHandler{ handler: handler, longRunningRequestCheck: longRunningRequestCheck, fcIfc: fcIfc, workEstimator: workEstimator, droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(), + newReqWaitCtxFn: newReqWaitCtxFn, } return http.HandlerFunc(priorityAndFairnessHandler.Handle) } @@ -356,3 +373,48 @@ func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string w.Header().Set("Retry-After", retryAfter) http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) } + +// getRequestWaitContext returns a new context with a deadline of how +// long the request is allowed to wait before it is removed from its +// queue and rejected. +// The context.CancelFunc returned must never be nil and the caller is +// responsible for calling the CancelFunc function for cleanup. +// - ctx: the context associated with the request (it may or may +// not have a deadline). +// - defaultRequestWaitLimit: the default wait duration that is used +// if the request context does not have any deadline. +// (a) initialization of a watch or +// (b) a request whose context has no deadline +// +// clock comes in handy for testing the function +func getRequestWaitContext(ctx context.Context, defaultRequestWaitLimit time.Duration, clock utilsclock.PassiveClock) (context.Context, context.CancelFunc) { + if ctx.Err() != nil { + return ctx, func() {} + } + + reqArrivedAt := clock.Now() + if reqReceivedTimestamp, ok := apirequest.ReceivedTimestampFrom(ctx); ok { + reqArrivedAt = reqReceivedTimestamp + } + + // a) we will allow the request to wait in the queue for one + // fourth of the time of its allotted deadline. + // b) if the request context does not have any deadline + // then we default to 'defaultRequestWaitLimit' + // in any case, the wait limit for any request must not + // exceed the hard limit of 1m + // + // request has deadline: + // wait-limit = min(remaining deadline / 4, 1m) + // request has no deadline: + // wait-limit = min(defaultRequestWaitLimit, 1m) + thisReqWaitLimit := defaultRequestWaitLimit + if deadline, ok := ctx.Deadline(); ok { + thisReqWaitLimit = deadline.Sub(reqArrivedAt) / 4 + } + if thisReqWaitLimit > time.Minute { + thisReqWaitLimit = time.Minute + } + + return context.WithDeadline(ctx, reqArrivedAt.Add(thisReqWaitLimit)) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 665f20beb..450c7d4f6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -158,7 +158,7 @@ type GenericAPIServer struct { openAPIConfig *openapicommon.Config // Enable swagger and/or OpenAPI V3 if these configs are non-nil. - openAPIV3Config *openapicommon.Config + openAPIV3Config *openapicommon.OpenAPIV3Config // SkipOpenAPIInstallation indicates not to install the OpenAPI handler // during PrepareRun. @@ -430,11 +430,9 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { } if s.openAPIV3Config != nil && !s.skipOpenAPIInstallation { - if utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { - s.OpenAPIV3VersionedService = routes.OpenAPI{ - Config: s.openAPIV3Config, - }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) - } + s.OpenAPIV3VersionedService = routes.OpenAPI{ + V3Config: s.openAPIV3Config, + }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) } s.installHealthz() diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index 4d6248f83..a4b4c5899 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -205,7 +205,6 @@ func StatusIsNot(statuses ...int) StacktracePred { func (rl *respLogger) Addf(format string, data ...interface{}) { rl.mutex.Lock() defer rl.mutex.Unlock() - rl.addedInfo.WriteString("\n") rl.addedInfo.WriteString(fmt.Sprintf(format, data...)) } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go index 13968b4e7..6ab58bab2 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -42,6 +42,9 @@ func NewAPIEnablementOptions() *APIEnablementOptions { // AddFlags adds flags for a specific APIServer to the specified FlagSet func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } fs.Var(&s.RuntimeConfig, "runtime-config", ""+ "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ "v1=true|false for the core API group\n"+ @@ -87,7 +90,6 @@ func (s *APIEnablementOptions) Validate(registries ...GroupRegistry) []error { // ApplyTo override MergedResourceConfig with defaults and registry func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { - if s == nil { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index fcb41942f..5b3da51fa 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -105,10 +105,36 @@ const ( kmsReloadHealthCheckName = "kms-providers" ) +var codecs serializer.CodecFactory + +// this atomic bool allows us to swap enablement of the KMSv2KDF feature in tests +// as the feature gate is now locked to true starting with v1.29 +// Note: it cannot be set by an end user +var kdfDisabled atomic.Bool + +// this function should only be called in tests to swap enablement of the KMSv2KDF feature +func SetKDFForTests(b bool) func() { + kdfDisabled.Store(!b) + return func() { + kdfDisabled.Store(false) + } +} + +// this function should be used to determine enablement of the KMSv2KDF feature +// instead of getting it from DefaultFeatureGate as the feature gate is now locked +// to true starting with v1.29 +func GetKDF() bool { + return !kdfDisabled.Load() +} + func init() { - metrics.RegisterMetrics() - storagevalue.RegisterMetrics() + configScheme := runtime.NewScheme() + utilruntime.Must(apiserverconfig.AddToScheme(configScheme)) + utilruntime.Must(apiserverconfigv1.AddToScheme(configScheme)) + codecs = serializer.NewCodecFactory(configScheme) envelopemetrics.RegisterMetrics() + storagevalue.RegisterMetrics() + metrics.RegisterMetrics() } type kmsPluginHealthzResponse struct { @@ -131,6 +157,8 @@ type kmsv2PluginProbe struct { service kmsservice.Service lastResponse *kmsPluginHealthzResponse l *sync.Mutex + apiServerID string + version string } type kmsHealthChecker []healthz.HealthChecker @@ -184,13 +212,13 @@ type EncryptionConfiguration struct { // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. // If reload is true, or KMS v2 plugins are used with no KMS v1 plugins, the returned slice of health checkers will always be of length 1. -func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*EncryptionConfiguration, error) { +func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool, apiServerID string) (*EncryptionConfiguration, error) { config, contentHash, err := loadConfig(filepath, reload) if err != nil { return nil, fmt.Errorf("error while parsing file: %w", err) } - transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(ctx, config) + transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(ctx, config, apiServerID) if err != nil { return nil, fmt.Errorf("error while building transformers: %w", err) } @@ -215,9 +243,9 @@ func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*E // getTransformerOverridesAndKMSPluginHealthzCheckers creates the set of transformers and KMS healthz checks based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) { +func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration, apiServerID string) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) { var kmsHealthChecks []healthz.HealthChecker - transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config) + transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config, apiServerID) if err != nil { return nil, nil, nil, err } @@ -236,7 +264,7 @@ type healthChecker interface { // getTransformerOverridesAndKMSPluginProbes creates the set of transformers and KMS probes based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) { +func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration, apiServerID string) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) { resourceToPrefixTransformer := map[schema.GroupResource][]storagevalue.PrefixTransformer{} var probes []healthChecker var kmsUsed kmsState @@ -245,7 +273,7 @@ func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apis for _, resourceConfig := range config.Resources { resourceConfig := resourceConfig - transformers, p, used, err := prefixTransformersAndProbes(ctx, resourceConfig) + transformers, p, used, err := prefixTransformersAndProbes(ctx, resourceConfig, apiServerID) if err != nil { return nil, nil, nil, err } @@ -362,7 +390,7 @@ func (h *kmsv2PluginProbe) rotateDEKOnKeyIDChange(ctx context.Context, statusKey // this gate can only change during tests, but the check is cheap enough to always make // this allows us to easily exercise both modes without restarting the API server // TODO integration test that this dynamically takes effect - useSeed := utilfeature.DefaultFeatureGate.Enabled(features.KMSv2KDF) + useSeed := GetKDF() stateUseSeed := state.EncryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED // state is valid and status keyID is unchanged from when we generated this DEK/seed so there is no need to rotate it @@ -447,15 +475,23 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C if response.Healthz != "ok" { errs = append(errs, fmt.Errorf("got unexpected healthz status: %s", response.Healthz)) } - if response.Version != envelopekmsv2.KMSAPIVersion { - errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersion, response.Version)) + if response.Version != envelopekmsv2.KMSAPIVersionv2 && response.Version != envelopekmsv2.KMSAPIVersionv2beta1 { + errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersionv2, response.Version)) + } else { + // set version for the first status response + if len(h.version) == 0 { + h.version = response.Version + } + if h.version != response.Version { + errs = append(errs, fmt.Errorf("KMSv2 API version should not change after the initial status response version %s, got %s", h.version, response.Version)) + } } if errCode, err := envelopekmsv2.ValidateKeyID(response.KeyID); err != nil { envelopemetrics.RecordInvalidKeyIDFromStatus(h.name, string(errCode)) errs = append(errs, fmt.Errorf("got invalid KMSv2 KeyID hash %q: %w", envelopekmsv2.GetHashIfNotEmpty(response.KeyID), err)) } else { - envelopemetrics.RecordKeyIDFromStatus(h.name, response.KeyID) + envelopemetrics.RecordKeyIDFromStatus(h.name, response.KeyID, h.apiServerID) // unconditionally append as we filter out nil errors below errs = append(errs, h.rotateDEKOnKeyIDChange(ctx, response.KeyID, string(uuid.NewUUID()))) } @@ -468,6 +504,24 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C // loadConfig parses the encryption configuration file at filepath and returns the parsed config and hash of the file. func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfiguration, string, error) { + data, contentHash, err := loadDataAndHash(filepath) + if err != nil { + return nil, "", fmt.Errorf("error while loading file: %w", err) + } + + configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, "", fmt.Errorf("error decoding encryption provider configuration file %q: %w", filepath, err) + } + config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, "", fmt.Errorf("got unexpected config type: %v", gvk) + } + + return config, contentHash, validation.ValidateEncryptionConfiguration(config, reload).ToAggregate() +} + +func loadDataAndHash(filepath string) ([]byte, string, error) { f, err := os.Open(filepath) if err != nil { return nil, "", fmt.Errorf("error opening encryption provider configuration file %q: %w", filepath, err) @@ -482,27 +536,20 @@ func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfig return nil, "", fmt.Errorf("encryption provider configuration file %q is empty", filepath) } - scheme := runtime.NewScheme() - codecs := serializer.NewCodecFactory(scheme) - utilruntime.Must(apiserverconfig.AddToScheme(scheme)) - utilruntime.Must(apiserverconfigv1.AddToScheme(scheme)) - - configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) - if err != nil { - return nil, "", fmt.Errorf("error decoding encryption provider configuration file %q: %w", filepath, err) - } - config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) - if !ok { - return nil, "", fmt.Errorf("got unexpected config type: %v", gvk) - } + return data, computeEncryptionConfigHash(data), nil +} - return config, computeEncryptionConfigHash(data), validation.ValidateEncryptionConfiguration(config, reload).ToAggregate() +// GetEncryptionConfigHash reads the encryption configuration file at filepath and returns the hash of the file. +// It does not attempt to decode or load the config, and serves as a cheap check to determine if the file has changed. +func GetEncryptionConfigHash(filepath string) (string, error) { + _, contentHash, err := loadDataAndHash(filepath) + return contentHash, err } // prefixTransformersAndProbes creates the set of transformers and KMS probes based on the given resource config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) { +func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration, apiServerID string) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) { var transformers []storagevalue.PrefixTransformer var probes []healthChecker var kmsUsed kmsState @@ -530,7 +577,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res transformer, transformerErr = secretboxPrefixTransformer(provider.Secretbox) case provider.KMS != nil: - transformer, probe, used, transformerErr = kmsPrefixTransformer(ctx, provider.KMS) + transformer, probe, used, transformerErr = kmsPrefixTransformer(ctx, provider.KMS, apiServerID) if transformerErr == nil { probes = append(probes, probe) kmsUsed.accumulate(used) @@ -689,7 +736,7 @@ func (s *kmsState) accumulate(other *kmsState) { // kmsPrefixTransformer creates a KMS transformer and probe based on the given KMS config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) { +func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration, apiServerID string) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) { kmsName := config.Name switch config.APIVersion { case kmsAPIVersionV1: @@ -735,14 +782,14 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig service: envelopeService, l: &sync.Mutex{}, lastResponse: &kmsPluginHealthzResponse{}, + apiServerID: apiServerID, } // initialize state so that Load always works probe.state.Store(&envelopekmsv2.State{}) primeAndProbeKMSv2(ctx, probe, kmsName) - transformer := storagevalue.PrefixTransformer{ - Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState), + Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState, apiServerID), Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"), } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go index 94782ccba..cde6a379e 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go @@ -20,9 +20,9 @@ import ( "context" "fmt" "net/http" + "sync" "time" - "github.com/fsnotify/fsnotify" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/healthz" @@ -35,8 +35,11 @@ import ( // workqueueKey is the dummy key used to process change in encryption config file. const workqueueKey = "key" -// DynamicKMSEncryptionConfigContent which can dynamically handle changes in encryption config file. -type DynamicKMSEncryptionConfigContent struct { +// EncryptionConfigFileChangePollDuration is exposed so that integration tests can crank up the reload speed. +var EncryptionConfigFileChangePollDuration = time.Minute + +// DynamicEncryptionConfigContent which can dynamically handle changes in encryption config file. +type DynamicEncryptionConfigContent struct { name string // filePath is the path of the file to read. @@ -50,6 +53,17 @@ type DynamicKMSEncryptionConfigContent struct { // dynamicTransformers updates the transformers when encryption config file changes. dynamicTransformers *encryptionconfig.DynamicTransformers + + // identity of the api server + apiServerID string + + // can be swapped during testing + getEncryptionConfigHash func(ctx context.Context, filepath string) (string, error) + loadEncryptionConfig func(ctx context.Context, filepath string, reload bool, apiServerID string) (*encryptionconfig.EncryptionConfiguration, error) +} + +func init() { + metrics.RegisterMetrics() } // NewDynamicEncryptionConfiguration returns controller that dynamically reacts to changes in encryption config file. @@ -57,94 +71,73 @@ func NewDynamicEncryptionConfiguration( name, filePath string, dynamicTransformers *encryptionconfig.DynamicTransformers, configContentHash string, -) *DynamicKMSEncryptionConfigContent { - encryptionConfig := &DynamicKMSEncryptionConfigContent{ + apiServerID string, +) *DynamicEncryptionConfigContent { + return &DynamicEncryptionConfigContent{ name: name, filePath: filePath, lastLoadedEncryptionConfigHash: configContentHash, dynamicTransformers: dynamicTransformers, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + apiServerID: apiServerID, + getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) { + return encryptionconfig.GetEncryptionConfigHash(filepath) + }, + loadEncryptionConfig: encryptionconfig.LoadEncryptionConfig, } - encryptionConfig.queue.Add(workqueueKey) // to avoid missing any file changes that occur in between the initial load and Run - - return encryptionConfig } -// Run starts the controller and blocks until stopCh is closed. -func (d *DynamicKMSEncryptionConfigContent) Run(ctx context.Context) { +// Run starts the controller and blocks until ctx is canceled. +func (d *DynamicEncryptionConfigContent) Run(ctx context.Context) { defer utilruntime.HandleCrash() - defer d.queue.ShutDown() klog.InfoS("Starting controller", "name", d.name) defer klog.InfoS("Shutting down controller", "name", d.name) - // start worker for processing content - go wait.UntilWithContext(ctx, d.runWorker, time.Second) + var wg sync.WaitGroup - // start the loop that watches the encryption config file until stopCh is closed. - go wait.UntilWithContext(ctx, func(ctx context.Context) { - if err := d.watchEncryptionConfigFile(ctx); err != nil { - // if there is an error while setting up or handling the watches, this will ensure that we will process the config file. - defer d.queue.Add(workqueueKey) - klog.ErrorS(err, "Failed to watch encryption config file, will retry later") - } - }, time.Second) - - <-ctx.Done() -} - -func (d *DynamicKMSEncryptionConfigContent) watchEncryptionConfigFile(ctx context.Context) error { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return fmt.Errorf("error creating fsnotify watcher: %w", err) - } - defer watcher.Close() - - if err = watcher.Add(d.filePath); err != nil { - return fmt.Errorf("error adding watch for file %s: %w", d.filePath, err) - } - - for { - select { - case event := <-watcher.Events: - if err := d.handleWatchEvent(event, watcher); err != nil { - return err - } - case err := <-watcher.Errors: - return fmt.Errorf("received fsnotify error: %w", err) - case <-ctx.Done(): - return nil - } - } -} - -func (d *DynamicKMSEncryptionConfigContent) handleWatchEvent(event fsnotify.Event, watcher *fsnotify.Watcher) error { - // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. - defer d.queue.Add(workqueueKey) + wg.Add(1) + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + defer d.queue.ShutDown() + <-ctx.Done() + }() - // return if file has not been removed or renamed. - if event.Op&(fsnotify.Remove|fsnotify.Rename) == 0 { - return nil - } + wg.Add(1) + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + d.runWorker(ctx) + }() - if err := watcher.Remove(d.filePath); err != nil { - klog.V(2).InfoS("Failed to remove file watch, it may have been deleted", "file", d.filePath, "err", err) - } - if err := watcher.Add(d.filePath); err != nil { - return fmt.Errorf("error adding watch for file %s: %w", d.filePath, err) - } + // this function polls changes in the encryption config file by placing a dummy key in the queue. + // the 'runWorker' function then picks up this dummy key and processes the changes. + // the goroutine terminates when 'ctx' is canceled. + _ = wait.PollUntilContextCancel( + ctx, + EncryptionConfigFileChangePollDuration, + true, + func(ctx context.Context) (bool, error) { + // add dummy item to the queue to trigger file content processing. + d.queue.Add(workqueueKey) + + // return false to continue polling. + return false, nil + }, + ) - return nil + wg.Wait() } // runWorker to process file content -func (d *DynamicKMSEncryptionConfigContent) runWorker(ctx context.Context) { +func (d *DynamicEncryptionConfigContent) runWorker(ctx context.Context) { for d.processNextWorkItem(ctx) { } } // processNextWorkItem processes file content when there is a message in the queue. -func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx context.Context) bool { +func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.Context) bool { // key here is dummy item in the queue to trigger file content processing. key, quit := d.queue.Get() if quit { @@ -152,6 +145,12 @@ func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx contex } defer d.queue.Done(key) + d.processWorkItem(serverCtx, key) + + return true +} + +func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) { var ( updatedEffectiveConfig bool err error @@ -172,32 +171,32 @@ func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx contex } if updatedEffectiveConfig && err == nil { - metrics.RecordEncryptionConfigAutomaticReloadSuccess() + metrics.RecordEncryptionConfigAutomaticReloadSuccess(d.apiServerID) } if err != nil { - metrics.RecordEncryptionConfigAutomaticReloadFailure() + metrics.RecordEncryptionConfigAutomaticReloadFailure(d.apiServerID) utilruntime.HandleError(fmt.Errorf("error processing encryption config file %s: %v", d.filePath, err)) // add dummy item back to the queue to trigger file content processing. - d.queue.AddRateLimited(key) + d.queue.AddRateLimited(workqueueKey) } }() encryptionConfiguration, configChanged, err = d.processEncryptionConfig(ctx) if err != nil { - return true + return } if !configChanged { - return true + return } if len(encryptionConfiguration.HealthChecks) != 1 { err = fmt.Errorf("unexpected number of healthz checks: %d. Should have only one", len(encryptionConfiguration.HealthChecks)) - return true + return } // get healthz checks for all new KMS plugins. if err = d.validateNewTransformersHealth(ctx, encryptionConfiguration.HealthChecks[0], encryptionConfiguration.KMSCloseGracePeriod); err != nil { - return true + return } // update transformers. @@ -214,30 +213,44 @@ func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx contex klog.V(2).InfoS("Loaded new kms encryption config content", "name", d.name) updatedEffectiveConfig = true - return true } // loadEncryptionConfig processes the next set of content from the file. -func (d *DynamicKMSEncryptionConfigContent) processEncryptionConfig(ctx context.Context) ( - encryptionConfiguration *encryptionconfig.EncryptionConfiguration, +func (d *DynamicEncryptionConfigContent) processEncryptionConfig(ctx context.Context) ( + _ *encryptionconfig.EncryptionConfiguration, configChanged bool, - err error, + _ error, ) { - // this code path will only execute if reload=true. So passing true explicitly. - encryptionConfiguration, err = encryptionconfig.LoadEncryptionConfig(ctx, d.filePath, true) + contentHash, err := d.getEncryptionConfigHash(ctx, d.filePath) if err != nil { return nil, false, err } // check if encryptionConfig is different from the current. Do nothing if they are the same. + if contentHash == d.lastLoadedEncryptionConfigHash { + klog.V(4).InfoS("Encryption config has not changed (before load)", "name", d.name) + return nil, false, nil + } + + // this code path will only execute if reload=true. So passing true explicitly. + encryptionConfiguration, err := d.loadEncryptionConfig(ctx, d.filePath, true, d.apiServerID) + if err != nil { + return nil, false, err + } + + // check if encryptionConfig is different from the current (again to avoid TOCTOU). Do nothing if they are the same. if encryptionConfiguration.EncryptionFileContentHash == d.lastLoadedEncryptionConfigHash { - klog.V(4).InfoS("Encryption config has not changed", "name", d.name) + klog.V(4).InfoS("Encryption config has not changed (after load)", "name", d.name) return nil, false, nil } + return encryptionConfiguration, true, nil } -func (d *DynamicKMSEncryptionConfigContent) validateNewTransformersHealth( +// minKMSPluginCloseGracePeriod can be lowered in unit tests to make the health check poll faster +var minKMSPluginCloseGracePeriod = 10 * time.Second + +func (d *DynamicEncryptionConfigContent) validateNewTransformersHealth( ctx context.Context, kmsPluginHealthzCheck healthz.HealthChecker, kmsPluginCloseGracePeriod time.Duration, @@ -245,8 +258,8 @@ func (d *DynamicKMSEncryptionConfigContent) validateNewTransformersHealth( // test if new transformers are healthy var healthCheckError error - if kmsPluginCloseGracePeriod < 10*time.Second { - kmsPluginCloseGracePeriod = 10 * time.Second + if kmsPluginCloseGracePeriod < minKMSPluginCloseGracePeriod { + kmsPluginCloseGracePeriod = minKMSPluginCloseGracePeriod } // really make sure that the immediate check does not hang diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go index 799b584cf..70414035f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go @@ -17,6 +17,9 @@ limitations under the License. package metrics import ( + "crypto/sha256" + "fmt" + "hash" "sync" "k8s.io/component-base/metrics" @@ -29,24 +32,26 @@ const ( ) var ( - encryptionConfigAutomaticReloadFailureTotal = metrics.NewCounter( + encryptionConfigAutomaticReloadFailureTotal = metrics.NewCounterVec( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_failures_total", - Help: "Total number of failed automatic reloads of encryption configuration.", + Help: "Total number of failed automatic reloads of encryption configuration split by apiserver identity.", StabilityLevel: metrics.ALPHA, }, + []string{"apiserver_id_hash"}, ) - encryptionConfigAutomaticReloadSuccessTotal = metrics.NewCounter( + encryptionConfigAutomaticReloadSuccessTotal = metrics.NewCounterVec( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_success_total", - Help: "Total number of successful automatic reloads of encryption configuration.", + Help: "Total number of successful automatic reloads of encryption configuration split by apiserver identity.", StabilityLevel: metrics.ALPHA, }, + []string{"apiserver_id_hash"}, ) encryptionConfigAutomaticReloadLastTimestampSeconds = metrics.NewGaugeVec( @@ -54,33 +59,53 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_last_timestamp_seconds", - Help: "Timestamp of the last successful or failed automatic reload of encryption configuration.", + Help: "Timestamp of the last successful or failed automatic reload of encryption configuration split by apiserver identity.", StabilityLevel: metrics.ALPHA, }, - []string{"status"}, + []string{"status", "apiserver_id_hash"}, ) ) var registerMetrics sync.Once +var hashPool *sync.Pool func RegisterMetrics() { registerMetrics.Do(func() { + hashPool = &sync.Pool{ + New: func() interface{} { + return sha256.New() + }, + } legacyregistry.MustRegister(encryptionConfigAutomaticReloadFailureTotal) legacyregistry.MustRegister(encryptionConfigAutomaticReloadSuccessTotal) legacyregistry.MustRegister(encryptionConfigAutomaticReloadLastTimestampSeconds) }) } -func RecordEncryptionConfigAutomaticReloadFailure() { - encryptionConfigAutomaticReloadFailureTotal.Inc() - recordEncryptionConfigAutomaticReloadTimestamp("failure") +func RecordEncryptionConfigAutomaticReloadFailure(apiServerID string) { + apiServerIDHash := getHash(apiServerID) + encryptionConfigAutomaticReloadFailureTotal.WithLabelValues(apiServerIDHash).Inc() + recordEncryptionConfigAutomaticReloadTimestamp("failure", apiServerIDHash) } -func RecordEncryptionConfigAutomaticReloadSuccess() { - encryptionConfigAutomaticReloadSuccessTotal.Inc() - recordEncryptionConfigAutomaticReloadTimestamp("success") +func RecordEncryptionConfigAutomaticReloadSuccess(apiServerID string) { + apiServerIDHash := getHash(apiServerID) + encryptionConfigAutomaticReloadSuccessTotal.WithLabelValues(apiServerIDHash).Inc() + recordEncryptionConfigAutomaticReloadTimestamp("success", apiServerIDHash) } -func recordEncryptionConfigAutomaticReloadTimestamp(result string) { - encryptionConfigAutomaticReloadLastTimestampSeconds.WithLabelValues(result).SetToCurrentTime() +func recordEncryptionConfigAutomaticReloadTimestamp(result, apiServerIDHash string) { + encryptionConfigAutomaticReloadLastTimestampSeconds.WithLabelValues(result, apiServerIDHash).SetToCurrentTime() +} + +func getHash(data string) string { + if len(data) == 0 { + return "" + } + h := hashPool.Get().(hash.Hash) + h.Reset() + h.Write([]byte(data)) + dataHash := fmt.Sprintf("sha256:%x", h.Sum(nil)) + hashPool.Put(h) + return dataHash } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index 57e9c1a9f..a1fc3168c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -44,8 +45,6 @@ import ( ) type EtcdOptions struct { - // The value of Paging on StorageConfig will be overridden by the - // calculated feature gate value. StorageConfig storagebackend.Config EncryptionProviderConfigFilepath string EncryptionProviderConfigAutomaticReload bool @@ -87,6 +86,12 @@ func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { return options } +var storageMediaTypes = sets.New( + runtime.ContentTypeJSON, + runtime.ContentTypeYAML, + runtime.ContentTypeProtobuf, +) + func (s *EtcdOptions) Validate() []error { if s == nil { return nil @@ -120,6 +125,10 @@ func (s *EtcdOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("--encryption-provider-config-automatic-reload must be set with --encryption-provider-config")) } + if s.DefaultStorageMediaType != "" && !storageMediaTypes.Has(s.DefaultStorageMediaType) { + allErrors = append(allErrors, fmt.Errorf("--storage-media-type %q invalid, allowed values: %s", s.DefaultStorageMediaType, strings.Join(sets.List(storageMediaTypes), ", "))) + } + return allErrors } @@ -294,7 +303,7 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro } }() - encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload) + encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload, c.APIServerID) if err != nil { return err } @@ -318,6 +327,7 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro s.EncryptionProviderConfigFilepath, dynamicTransformers, encryptionConfiguration.EncryptionFileContentHash, + c.APIServerID, ) go dynamicEncryptionConfigController.Run(ctxServer) @@ -331,18 +341,23 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro c.ResourceTransformers = dynamicTransformers if !s.SkipHealthEndpoints { - c.AddHealthChecks(dynamicTransformers) + addHealthChecksWithoutLivez(c, dynamicTransformers) } } else { c.ResourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) if !s.SkipHealthEndpoints { - c.AddHealthChecks(encryptionConfiguration.HealthChecks...) + addHealthChecksWithoutLivez(c, encryptionConfiguration.HealthChecks...) } } return nil } +func addHealthChecksWithoutLivez(c *server.Config, healthChecks ...healthz.HealthChecker) { + c.HealthzChecks = append(c.HealthzChecks, healthChecks...) + c.ReadyzChecks = append(c.ReadyzChecks, healthChecks...) +} + func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig, c.DrainedNotify()) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go index 35596fba6..f01195560 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/feature.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -17,16 +17,22 @@ limitations under the License. package options import ( + "fmt" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/server" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" ) type FeatureOptions struct { EnableProfiling bool DebugSocketPath string EnableContentionProfiling bool + EnablePriorityAndFairness bool } func NewFeatureOptions() *FeatureOptions { @@ -36,6 +42,7 @@ func NewFeatureOptions() *FeatureOptions { EnableProfiling: defaults.EnableProfiling, DebugSocketPath: defaults.DebugSocketPath, EnableContentionProfiling: defaults.EnableContentionProfiling, + EnablePriorityAndFairness: true, } } @@ -50,9 +57,11 @@ func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { "Enable block profiling, if profiling is enabled") fs.StringVar(&o.DebugSocketPath, "debug-socket-path", o.DebugSocketPath, "Use an unprotected (no authn/authz) unix-domain socket for profiling with the given path") + fs.BoolVar(&o.EnablePriorityAndFairness, "enable-priority-and-fairness", o.EnablePriorityAndFairness, ""+ + "If true, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") } -func (o *FeatureOptions) ApplyTo(c *server.Config) error { +func (o *FeatureOptions) ApplyTo(c *server.Config, clientset kubernetes.Interface, informers informers.SharedInformerFactory) error { if o == nil { return nil } @@ -61,6 +70,18 @@ func (o *FeatureOptions) ApplyTo(c *server.Config) error { c.DebugSocketPath = o.DebugSocketPath c.EnableContentionProfiling = o.EnableContentionProfiling + if o.EnablePriorityAndFairness { + if c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight <= 0 { + return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight) + + } + c.FlowControl = utilflowcontrol.New( + informers, + clientset.FlowcontrolV1(), + c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight, + ) + } + return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 69f8fb515..eb7e67b36 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,20 +17,15 @@ limitations under the License. package options import ( - "fmt" - "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/util/feature" - utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/component-base/featuregate" - "k8s.io/klog/v2" ) // RecommendedOptions contains the recommended options for running an API server. @@ -122,17 +117,17 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.Audit.ApplyTo(&config.Config); err != nil { return err } - if err := o.Features.ApplyTo(&config.Config); err != nil { - return err - } if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - initializers, err := o.ExtraAdmissionInitializers(config) + kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) if err != nil { return err } - kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) + if err := o.Features.ApplyTo(&config.Config, kubeClient, config.SharedInformerFactory); err != nil { + return err + } + initializers, err := o.ExtraAdmissionInitializers(config) if err != nil { return err } @@ -144,22 +139,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { initializers...); err != nil { return err } - if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { - if config.ClientConfig != nil { - if config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight <= 0 { - return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", config.MaxRequestsInFlight, config.MaxMutatingRequestsInFlight) - - } - config.FlowControl = utilflowcontrol.New( - config.SharedInformerFactory, - kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(), - config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, - config.RequestTimeout/4, - ) - } else { - klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") - } - } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index f9d574d5d..1373d8a4d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -62,8 +62,7 @@ type ServerRunOptions struct { // decoded in a write request. 0 means no limit. // We intentionally did not add a flag for this option. Users of the // apiserver library can wire it to a flag. - MaxRequestBodyBytes int64 - EnablePriorityAndFairness bool + MaxRequestBodyBytes int64 // ShutdownSendRetryAfter dictates when to initiate shutdown of the HTTP // Server during the graceful termination of the apiserver. If true, we wait @@ -104,7 +103,6 @@ func NewServerRunOptions() *ServerRunOptions { ShutdownWatchTerminationGracePeriod: defaults.ShutdownWatchTerminationGracePeriod, JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, - EnablePriorityAndFairness: true, ShutdownSendRetryAfter: false, } } @@ -325,9 +323,6 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") - fs.BoolVar(&s.EnablePriorityAndFairness, "enable-priority-and-fairness", s.EnablePriorityAndFairness, ""+ - "If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") - fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index efda02ef7..842ab7ee0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -260,7 +260,39 @@ func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error c := *config serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile - // load main cert + // load main cert *original description until 2023-08-18* + + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + >1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this block, + apiserver certificate and pub key data (along with priv key)get loaded into server.SecureServingInfo + for client to later in the step 2 verify the apiserver certificate during the handshake + when making a request + + normal args related to this stage: + --tls-cert-file string File containing the default x509 Certificate for HTTPS. + (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and + --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate + and key are generated for the public address and saved to the directory specified by + --cert-dir + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. + + (retrievable from "kube-apiserver --help" command) + (suggested by @deads2k) + + see also: + - for the step 2, see: staging/src/k8s.io/client-go/transport/transport.go + - for the step 3, see: staging/src/k8s.io/client-go/transport/transport.go + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { var err error c.Cert, err = dynamiccertificates.NewDynamicServingContentFromFiles("serving-cert", serverCertFile, serverKeyFile) diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go index ad1eb2835..8fd4d5599 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go @@ -17,6 +17,7 @@ limitations under the License. package routes import ( + handlersmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/server/mux" cachermetrics "k8s.io/apiserver/pkg/storage/cacher/metrics" @@ -52,4 +53,5 @@ func register() { etcd3metrics.Register() flowcontrolmetrics.Register() peerproxymetrics.Register() + handlersmetrics.Register() } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 2819d1576..12c8b1ad9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -32,7 +32,8 @@ import ( // OpenAPI installs spec endpoints for each web service. type OpenAPI struct { - Config *common.Config + Config *common.Config + V3Config *common.OpenAPIV3Config } // Install adds the SwaggerUI webservice to the given mux. @@ -65,7 +66,7 @@ func (oa OpenAPI) InstallV3(c *restful.Container, mux *mux.PathRecorderMux) *han } for gv, ws := range grouped { - spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.Config) + spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.V3Config) if err != nil { klog.Errorf("Failed to build OpenAPI v3 for group %s, %q", gv, err) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index be4d0390d..0dc50cea6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -19,15 +19,13 @@ package storage import ( "crypto/tls" "crypto/x509" - "io/ioutil" + "os" "strings" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage/storagebackend" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -114,8 +112,6 @@ type groupResourceOverrides struct { // decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of // returned decoders will be priority for attempt to decode. decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder - // disablePaging will prevent paging on the provided resource. - disablePaging bool } // Apply overrides the provided config and options if the override has a value in that position @@ -139,9 +135,6 @@ func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *St if o.decoderDecoratorFn != nil { options.DecoderDecoratorFn = o.decoderDecoratorFn } - if o.disablePaging { - config.Paging = false - } } var _ StorageFactory = &DefaultStorageFactory{} @@ -156,7 +149,6 @@ func NewDefaultStorageFactory( resourceConfig APIResourceConfigSource, specialDefaultResourcePrefixes map[schema.GroupResource]string, ) *DefaultStorageFactory { - config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) if len(defaultMediaType) == 0 { defaultMediaType = runtime.ContentTypeJSON } @@ -185,14 +177,6 @@ func (s *DefaultStorageFactory) SetEtcdPrefix(groupResource schema.GroupResource s.Overrides[groupResource] = overrides } -// SetDisableAPIListChunking allows a specific resource to disable paging at the storage layer, to prevent -// exposure of key names in continuations. This may be overridden by feature gates. -func (s *DefaultStorageFactory) SetDisableAPIListChunking(groupResource schema.GroupResource) { - overrides := s.Overrides[groupResource] - overrides.disablePaging = true - s.Overrides[groupResource] = overrides -} - // SetResourceEtcdPrefix sets the prefix for a resource, but not the base-dir. You'll end up in `etcdPrefix/resourceEtcdPrefix`. func (s *DefaultStorageFactory) SetResourceEtcdPrefix(groupResource schema.GroupResource, prefix string) { overrides := s.Overrides[groupResource] @@ -337,7 +321,7 @@ func backends(storageConfig storagebackend.Config, grOverrides map[schema.GroupR } } if len(storageConfig.Transport.TrustedCAFile) > 0 { - if caCert, err := ioutil.ReadFile(storageConfig.Transport.TrustedCAFile); err != nil { + if caCert, err := os.ReadFile(storageConfig.Transport.TrustedCAFile); err != nil { klog.Errorf("failed to read ca file while getting backends: %s", err) } else { caPool := x509.NewCertPool() diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go index 478d2151d..595fd5036 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go @@ -22,7 +22,6 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -177,7 +176,6 @@ func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool { // This means that we couldn't send event to that watcher. // Since we don't want to block on it infinitely, // we simply terminate it. - klog.V(1).Infof("Forcing %v watcher close due to unresponsiveness: %v. len(c.input) = %v, len(c.result) = %v", c.groupResource.String(), c.identifier, len(c.input), len(c.result)) metrics.TerminatedWatchersCounter.WithLabelValues(c.groupResource.String()).Inc() // This means that we couldn't send event to that watcher. // Since we don't want to block on it infinitely, we simply terminate it. @@ -365,17 +363,10 @@ func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event if event.Type == watch.Bookmark { e := &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()} if !c.wasBookmarkAfterRvSent() { - objMeta, err := meta.Accessor(e.Object) - if err != nil { + if err := storage.AnnotateInitialEventsEndBookmark(e.Object); err != nil { utilruntime.HandleError(fmt.Errorf("error while accessing object's metadata gr: %v, identifier: %v, obj: %#v, err: %v", c.groupResource, c.identifier, e.Object, err)) return nil } - objAnnotations := objMeta.GetAnnotations() - if objAnnotations == nil { - objAnnotations = map[string]string{} - } - objAnnotations["k8s.io/initial-events-end"] = "true" - objMeta.SetAnnotations(objAnnotations) } return e } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 0796f591d..4f4080441 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -21,7 +21,6 @@ import ( "fmt" "net/http" "reflect" - "strconv" "sync" "time" @@ -113,11 +112,8 @@ func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { wm[number] = w } -func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) { - if watcher, ok := wm[number]; ok { - delete(wm, number) - done(watcher) - } +func (wm watchersMap) deleteWatcher(number int) { + delete(wm, number) } func (wm watchersMap) terminateAll(done func(*cacheWatcher)) { @@ -148,14 +144,14 @@ func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, scope namespac } } -func (i *indexedWatchers) deleteWatcher(number int, scope namespacedName, value string, supported bool, done func(*cacheWatcher)) { +func (i *indexedWatchers) deleteWatcher(number int, scope namespacedName, value string, supported bool) { if supported { - i.valueWatchers[value].deleteWatcher(number, done) + i.valueWatchers[value].deleteWatcher(number) if len(i.valueWatchers[value]) == 0 { delete(i.valueWatchers, value) } } else { - i.allWatchers[scope].deleteWatcher(number, done) + i.allWatchers[scope].deleteWatcher(number) if len(i.allWatchers[scope]) == 0 { delete(i.allWatchers, scope) } @@ -725,15 +721,14 @@ func shouldDelegateList(opts storage.ListOptions) bool { resourceVersion := opts.ResourceVersion pred := opts.Predicate match := opts.ResourceVersionMatch - pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) // Serve consistent reads from storage if ConsistentListFromCache is disabled consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled // Watch cache doesn't support continuations, so serve them from etcd. - hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasContinuation := len(pred.Continue) > 0 // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + hasLimit := pred.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan @@ -773,7 +768,7 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio return c.storage.GetList(ctx, key, opts, listObj) } if listRV == 0 && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) { - listRV, err = c.getCurrentResourceVersionFromStorage(ctx) + listRV, err = storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String()) if err != nil { return err } @@ -1225,7 +1220,8 @@ func forgetWatcher(c *Cacher, w *cacheWatcher, index int, scope namespacedName, // It's possible that the watcher is already not in the structure (e.g. in case of // simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopLocked() // on a watcher multiple times. - c.watchers.deleteWatcher(index, scope, triggerValue, triggerSupported, c.stopWatcherLocked) + c.watchers.deleteWatcher(index, scope, triggerValue, triggerSupported) + c.stopWatcherLocked(w) } } @@ -1249,48 +1245,12 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { return c.versioner.ParseResourceVersion(resourceVersion) } -// getCurrentResourceVersionFromStorage gets the current resource version from the underlying storage engine. -// this method issues an empty list request and reads only the ResourceVersion from the object metadata -func (c *Cacher) getCurrentResourceVersionFromStorage(ctx context.Context) (uint64, error) { - if c.newListFunc == nil { - return 0, fmt.Errorf("newListFunction wasn't provided for %v", c.objectType) - } - emptyList := c.newListFunc() - pred := storage.SelectionPredicate{ - Label: labels.Everything(), - Field: fields.Everything(), - Limit: 1, // just in case we actually hit something - } - - err := c.storage.GetList(ctx, c.resourcePrefix, storage.ListOptions{Predicate: pred}, emptyList) - if err != nil { - return 0, err - } - emptyListAccessor, err := meta.ListAccessor(emptyList) - if err != nil { - return 0, err - } - if emptyListAccessor == nil { - return 0, fmt.Errorf("unable to extract a list accessor from %T", emptyList) - } - - currentResourceVersion, err := strconv.Atoi(emptyListAccessor.GetResourceVersion()) - if err != nil { - return 0, err - } - - if currentResourceVersion == 0 { - return 0, fmt.Errorf("the current resource version must be greater than 0") - } - return uint64(currentResourceVersion), nil -} - // getBookmarkAfterResourceVersionLockedFunc returns a function that // spits a ResourceVersion after which the bookmark event will be delivered. // // The returned function must be called under the watchCache lock. func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, parsedResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == false || !opts.Predicate.AllowWatchBookmarks { + if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { return func() uint64 { return 0 }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedResourceVersion, opts) @@ -1305,7 +1265,7 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, // // The returned function must be called under the watchCache lock. func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == true { + if opts.SendInitialEvents == nil || *opts.SendInitialEvents { return func() uint64 { return parsedWatchResourceVersion }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedWatchResourceVersion, opts) @@ -1318,7 +1278,7 @@ func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { switch { case len(opts.ResourceVersion) == 0: - rv, err := c.getCurrentResourceVersionFromStorage(ctx) + rv, err := storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String()) if err != nil { return nil, err } @@ -1336,7 +1296,7 @@ func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedW // Additionally, it instructs the caller whether it should ask for // all events from the cache (full state) or not. func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) (bool, error) { - if opts.SendInitialEvents != nil && *opts.SendInitialEvents == true { + if opts.SendInitialEvents != nil && *opts.SendInitialEvents { err := c.watchCache.waitUntilFreshAndBlock(ctx, requestedWatchRV) defer c.watchCache.RUnlock() return err == nil, err diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go index ed4f4d0d0..5f29097c5 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -17,13 +17,16 @@ limitations under the License. package storage import ( + "errors" "fmt" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) +var ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") + const ( ErrCodeKeyNotFound int = iota + 1 ErrCodeKeyExists @@ -176,7 +179,7 @@ var tooLargeResourceVersionCauseMsg = "Too large resource version" // NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for // a minimum resource version that is larger than the largest currently available resource version for a requested resource. func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { - err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err := apierrors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) err.ErrStatus.Details.Causes = []metav1.StatusCause{ { Type: metav1.CauseTypeResourceVersionTooLarge, @@ -188,8 +191,8 @@ func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uin // IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. func IsTooLargeResourceVersion(err error) bool { - if !errors.IsTimeout(err) { + if !apierrors.IsTimeout(err) { return false } - return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) + return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go index 3e5bfb1c6..e7644ddfa 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -30,6 +30,17 @@ type event struct { isDeleted bool isCreated bool isProgressNotify bool + // isInitialEventsEndBookmark helps us keep track + // of whether we have sent an annotated bookmark event. + // + // when this variable is set to true, + // a special annotation will be added + // to the bookmark event. + // + // note that we decided to extend the event + // struct field to eliminate contention + // between startWatching and processEvent + isInitialEventsEndBookmark bool } // parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event. diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index ac023d55d..fadc87d53 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -69,7 +69,7 @@ var ( objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_storage_objects", - Help: "Number of stored objects at the time of last check split by kind.", + Help: "Number of stored objects at the time of last check split by kind. In case of a fetching error, the value will be -1.", StabilityLevel: compbasemetrics.STABLE, }, []string{"resource"}, @@ -228,7 +228,7 @@ func UpdateEtcdDbSize(ep string, size int64) { // SetStorageMonitorGetter sets monitor getter to allow monitoring etcd stats. func SetStorageMonitorGetter(getter func() ([]Monitor, error)) { - storageMonitor.monitorGetter = getter + storageMonitor.setGetter(getter) } // UpdateLeaseObjectCount sets the etcd_lease_object_counts metric. @@ -258,9 +258,22 @@ type StorageMetrics struct { type monitorCollector struct { compbasemetrics.BaseStableCollector + mutex sync.Mutex monitorGetter func() ([]Monitor, error) } +func (m *monitorCollector) setGetter(monitorGetter func() ([]Monitor, error)) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.monitorGetter = monitorGetter +} + +func (m *monitorCollector) getGetter() func() ([]Monitor, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.monitorGetter +} + // DescribeWithStability implements compbasemetrics.StableColletor func (c *monitorCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) { ch <- storageSizeDescription @@ -268,7 +281,7 @@ func (c *monitorCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc // CollectWithStability implements compbasemetrics.StableColletor func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) { - monitors, err := c.monitorGetter() + monitors, err := c.getGetter()() if err != nil { return } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 737415223..1e5e40d62 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -32,19 +32,15 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" "k8s.io/klog/v2" ) @@ -81,7 +77,6 @@ type store struct { groupResource schema.GroupResource groupResourceString string watcher *watcher - pagingEnabled bool leaseManager *leaseManager } @@ -100,11 +95,11 @@ type objState struct { } // New returns an etcd3 implementation of storage.Interface. -func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface { - return newStore(c, codec, newFunc, prefix, groupResource, transformer, pagingEnabled, leaseManagerConfig) +func New(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) storage.Interface { + return newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, leaseManagerConfig) } -func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store { +func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) *store { versioner := storage.APIObjectVersioner{} // for compatibility with etcd2 impl. // no-op for default prefix of '/registry'. @@ -114,19 +109,36 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Ob // Ensure the pathPrefix ends in "/" here to simplify key concatenation later. pathPrefix += "/" } - result := &store{ + + w := &watcher{ + client: c, + codec: codec, + newFunc: newFunc, + groupResource: groupResource, + versioner: versioner, + transformer: transformer, + } + if newFunc == nil { + w.objectType = "" + } else { + w.objectType = reflect.TypeOf(newFunc()).String() + } + s := &store{ client: c, codec: codec, versioner: versioner, transformer: transformer, - pagingEnabled: pagingEnabled, pathPrefix: pathPrefix, groupResource: groupResource, groupResourceString: groupResource.String(), - watcher: newWatcher(c, codec, groupResource, newFunc, versioner), + watcher: w, leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), } - return result + + w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) { + return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType) + } + return s } // Versioner implements storage.Interface.Versioner. @@ -185,7 +197,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ) defer span.End(500 * time.Millisecond) if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { - return errors.New("resourceVersion should not be set on objects to be created") + return storage.ErrResourceVersionSetOnCreate } if err := s.versioner.PrepareObjectForStorage(obj); err != nil { return fmt.Errorf("PrepareObjectForStorage failed: %v", err) @@ -258,15 +270,7 @@ func (s *store) Delete( func (s *store) conditionalDelete( ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { - getCurrentState := func() (*objState, error) { - startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, key) - metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) - if err != nil { - return nil, err - } - return s.getState(ctx, getResp, key, v, false) - } + getCurrentState := s.getCurrentState(ctx, key, v, false) var origState *objState var err error @@ -394,15 +398,7 @@ func (s *store) GuaranteedUpdate( return fmt.Errorf("unable to convert output object to pointer: %v", err) } - getCurrentState := func() (*objState, error) { - startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, preparedKey) - metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) - if err != nil { - return nil, err - } - return s.getState(ctx, getResp, preparedKey, v, ignoreNotFound) - } + getCurrentState := s.getCurrentState(ctx, preparedKey, v, ignoreNotFound) var origState *objState var origStateIsCurrent bool @@ -594,17 +590,13 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if err != nil { return err } - recursive := opts.Recursive - resourceVersion := opts.ResourceVersion - match := opts.ResourceVersionMatch - pred := opts.Predicate - ctx, span := tracing.Start(ctx, fmt.Sprintf("List(recursive=%v) etcd3", recursive), + ctx, span := tracing.Start(ctx, fmt.Sprintf("List(recursive=%v) etcd3", opts.Recursive), attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)), attribute.String("key", key), - attribute.String("resourceVersion", resourceVersion), - attribute.String("resourceVersionMatch", string(match)), - attribute.Int("limit", int(pred.Limit)), - attribute.String("continue", pred.Continue)) + attribute.String("resourceVersion", opts.ResourceVersion), + attribute.String("resourceVersionMatch", string(opts.ResourceVersionMatch)), + attribute.Int("limit", int(opts.Predicate.Limit)), + attribute.String("continue", opts.Predicate.Continue)) defer span.End(500 * time.Millisecond) listPtr, err := meta.GetItemsPtr(listObj) if err != nil { @@ -619,97 +611,68 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // get children "directories". e.g. if we have key "/a", "/a/b", "/ab", getting keys // with prefix "/a" will return all three, while with prefix "/a/" will return only // "/a/b" which is the correct answer. - if recursive && !strings.HasSuffix(preparedKey, "/") { + if opts.Recursive && !strings.HasSuffix(preparedKey, "/") { preparedKey += "/" } keyPrefix := preparedKey // set the appropriate clientv3 options to filter the returned data set var limitOption *clientv3.OpOption - limit := pred.Limit + limit := opts.Predicate.Limit var paging bool options := make([]clientv3.OpOption, 0, 4) - if s.pagingEnabled && pred.Limit > 0 { + if opts.Predicate.Limit > 0 { paging = true options = append(options, clientv3.WithLimit(limit)) limitOption = &options[len(options)-1] } - newItemFunc := getNewItemFunc(listObj, v) - - var fromRV *uint64 - if len(resourceVersion) > 0 { - parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) - } - fromRV = &parsedRV + if opts.Recursive { + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) } - var returnedRV, continueRV, withRev int64 + newItemFunc := getNewItemFunc(listObj, v) + + var continueRV, withRev int64 var continueKey string switch { - case recursive && s.pagingEnabled && len(pred.Continue) > 0: - continueKey, continueRV, err = storage.DecodeContinue(pred.Continue, keyPrefix) + case opts.Recursive && len(opts.Predicate.Continue) > 0: + continueKey, continueRV, err = storage.DecodeContinue(opts.Predicate.Continue, keyPrefix) if err != nil { return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) } - if len(resourceVersion) > 0 && resourceVersion != "0" { + if len(opts.ResourceVersion) > 0 && opts.ResourceVersion != "0" { return apierrors.NewBadRequest("specifying resource version is not allowed when using continue") } - - rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) - options = append(options, clientv3.WithRange(rangeEnd)) preparedKey = continueKey - // If continueRV > 0, the LIST request needs a specific resource version. // continueRV==0 is invalid. // If continueRV < 0, the request is for the latest resource version. if continueRV > 0 { withRev = continueRV - returnedRV = continueRV } - case recursive && s.pagingEnabled && pred.Limit > 0: - if fromRV != nil { - switch match { - case metav1.ResourceVersionMatchNotOlderThan: - // The not older than constraint is checked after we get a response from etcd, - // and returnedRV is then set to the revision we get from the etcd response. - case metav1.ResourceVersionMatchExact: - returnedRV = int64(*fromRV) - withRev = returnedRV - case "": // legacy case - if *fromRV > 0 { - returnedRV = int64(*fromRV) - withRev = returnedRV - } - default: - return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) - } + case len(opts.ResourceVersion) > 0: + parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) } - - rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) - options = append(options, clientv3.WithRange(rangeEnd)) - default: - if fromRV != nil { - switch match { - case metav1.ResourceVersionMatchNotOlderThan: - // The not older than constraint is checked after we get a response from etcd, - // and returnedRV is then set to the revision we get from the etcd response. - case metav1.ResourceVersionMatchExact: - returnedRV = int64(*fromRV) - withRev = returnedRV - case "": // legacy case - default: - return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + switch opts.ResourceVersionMatch { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + withRev = int64(parsedRV) + case "": // legacy case + if opts.Recursive && opts.Predicate.Limit > 0 && parsedRV > 0 { + withRev = int64(parsedRV) } - } - - if recursive { - options = append(options, clientv3.WithPrefix()) + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch) } } + if withRev != 0 { options = append(options, clientv3.WithRev(withRev)) } @@ -728,7 +691,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption }() metricsOp := "get" - if recursive { + if opts.Recursive { metricsOp = "list" } @@ -737,10 +700,10 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption getResp, err = s.client.KV.Get(ctx, preparedKey, options...) metrics.RecordEtcdRequest(metricsOp, s.groupResourceString, err, startTime) if err != nil { - return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) + return interpretListError(err, len(opts.Predicate.Continue) > 0, continueKey, keyPrefix) } numFetched += len(getResp.Kvs) - if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { + if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { return err } hasMore = getResp.More @@ -748,10 +711,15 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if len(getResp.Kvs) == 0 && getResp.More { return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") } + // indicate to the client which resource version was returned, and use the same resource version for subsequent requests. + if withRev == 0 { + withRev = getResp.Header.Revision + options = append(options, clientv3.WithRev(withRev)) + } // avoid small allocations for the result slice, since this can be called in many // different contexts and we don't know how significantly the result will be filtered - if pred.Empty() { + if opts.Predicate.Empty() { growSlice(v, len(getResp.Kvs)) } else { growSlice(v, 2048, len(getResp.Kvs)) @@ -759,7 +727,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // take items from the response until the bucket is full, filtering as we go for i, kv := range getResp.Kvs { - if paging && int64(v.Len()) >= pred.Limit { + if paging && int64(v.Len()) >= opts.Predicate.Limit { hasMore = true break } @@ -770,7 +738,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) } - if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { + if err := appendListItem(v, data, uint64(kv.ModRevision), opts.Predicate, s.codec, s.versioner, newItemFunc); err != nil { recordDecodeError(s.groupResourceString, string(kv.Key)) return err } @@ -780,17 +748,12 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption getResp.Kvs[i] = nil } - // indicate to the client which resource version was returned - if returnedRV == 0 { - returnedRV = getResp.Header.Revision - } - // no more results remain or we didn't request paging if !hasMore || !paging { break } // we're paging but we have filled our bucket - if int64(v.Len()) >= pred.Limit { + if int64(v.Len()) >= opts.Predicate.Limit { break } @@ -804,11 +767,8 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption *limitOption = clientv3.WithLimit(limit) } preparedKey = string(lastKey) + "\x00" - if withRev == 0 { - withRev = returnedRV - options = append(options, clientv3.WithRev(withRev)) - } } + if v.IsNil() { // Ensure that we never return a nil Items pointer in the result for consistency. v.Set(reflect.MakeSlice(v.Type(), 0, 0)) @@ -818,7 +778,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // we never return a key that the client wouldn't be allowed to see if hasMore { // we want to start immediately after the last key - next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV) + next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, withRev) if err != nil { return err } @@ -826,17 +786,15 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // getResp.Count counts in objects that do not match the pred. // Instead of returning inaccurate count for non-empty selectors, we return nil. // Only set remainingItemCount if the predicate is empty. - if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) { - if pred.Empty() { - c := int64(getResp.Count - pred.Limit) - remainingItemCount = &c - } + if opts.Predicate.Empty() { + c := int64(getResp.Count - opts.Predicate.Limit) + remainingItemCount = &c } - return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount) + return s.versioner.UpdateList(listObj, uint64(withRev), next, remainingItemCount) } // no continuation - return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil) + return s.versioner.UpdateList(listObj, uint64(withRev), "", nil) } // growSlice takes a slice value and grows its capacity up @@ -871,18 +829,7 @@ func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { } // Watch implements storage.Interface.Watch. -// TODO(#115478): In order to graduate the WatchList feature to beta, the etcd3 implementation must/should also support it. func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { - // it is safe to skip SendInitialEvents if the request is backward compatible - // see https://github.com/kubernetes/kubernetes/blob/267eb25e60955fe8e438c6311412e7cf7d028acb/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go#L260 - compatibility := opts.Predicate.AllowWatchBookmarks == false && (opts.ResourceVersion == "" || opts.ResourceVersion == "0") - if opts.SendInitialEvents != nil && !compatibility { - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: s.groupResource.Group, Kind: s.groupResource.Resource}, - "", - field.ErrorList{field.Forbidden(field.NewPath("sendInitialEvents"), "for watch is unsupported by an etcd cluster")}, - ) - } preparedKey, err := s.prepareKey(key) if err != nil { return nil, err @@ -891,7 +838,7 @@ func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) if err != nil { return nil, err } - return s.watcher.Watch(s.watchContext(ctx), preparedKey, int64(rev), opts.Recursive, opts.ProgressNotify, s.transformer, opts.Predicate) + return s.watcher.Watch(s.watchContext(ctx), preparedKey, int64(rev), opts) } func (s *store) watchContext(ctx context.Context) context.Context { @@ -905,6 +852,18 @@ func (s *store) watchContext(ctx context.Context) context.Context { return clientv3.WithRequireLeader(ctx) } +func (s *store) getCurrentState(ctx context.Context, key string, v reflect.Value, ignoreNotFound bool) func() (*objState, error) { + return func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) + if err != nil { + return nil, err + } + return s.getState(ctx, getResp, key, v, ignoreNotFound) + } +} + func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { state := &objState{ meta: &storage.ResponseMeta{}, diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index d4929bd9d..85acf44f8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -18,27 +18,29 @@ package etcd3 import ( "context" + "errors" "fmt" "os" - "reflect" "strconv" "strings" "sync" + "time" + clientv3 "go.etcd.io/etcd/client/v3" grpccodes "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" - - clientv3 "go.etcd.io/etcd/client/v3" - "k8s.io/klog/v2" ) @@ -48,6 +50,9 @@ const ( outgoingBufSize = 100 ) +// defaultWatcherMaxLimit is used to facilitate construction tests +var defaultWatcherMaxLimit int64 = maxLimit + // fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error var fatalOnDecodeError = false @@ -63,18 +68,19 @@ func TestOnlySetFatalOnDecodeError(b bool) { } type watcher struct { - client *clientv3.Client - codec runtime.Codec - newFunc func() runtime.Object - objectType string - groupResource schema.GroupResource - versioner storage.Versioner + client *clientv3.Client + codec runtime.Codec + newFunc func() runtime.Object + objectType string + groupResource schema.GroupResource + versioner storage.Versioner + transformer value.Transformer + getCurrentStorageRV func(context.Context) (uint64, error) } // watchChan implements watch.Interface. type watchChan struct { watcher *watcher - transformer value.Transformer key string initialRev int64 recursive bool @@ -87,35 +93,26 @@ type watchChan struct { errChan chan error } -func newWatcher(client *clientv3.Client, codec runtime.Codec, groupResource schema.GroupResource, newFunc func() runtime.Object, versioner storage.Versioner) *watcher { - res := &watcher{ - client: client, - codec: codec, - groupResource: groupResource, - newFunc: newFunc, - versioner: versioner, - } - if newFunc == nil { - res.objectType = "" - } else { - res.objectType = reflect.TypeOf(newFunc()).String() - } - return res -} - // Watch watches on a key and returns a watch.Interface that transfers relevant notifications. // If rev is zero, it will return the existing object(s) and then start watching from // the maximum revision+1 from returned objects. // If rev is non-zero, it will watch events happened after given revision. -// If recursive is false, it watches on given key. -// If recursive is true, it watches any children and directories under the key, excluding the root key itself. -// pred must be non-nil. Only if pred matches the change, it will be returned. -func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, progressNotify bool, transformer value.Transformer, pred storage.SelectionPredicate) (watch.Interface, error) { - if recursive && !strings.HasSuffix(key, "/") { +// If opts.Recursive is false, it watches on given key. +// If opts.Recursive is true, it watches any children and directories under the key, excluding the root key itself. +// pred must be non-nil. Only if opts.Predicate matches the change, it will be returned. +func (w *watcher) Watch(ctx context.Context, key string, rev int64, opts storage.ListOptions) (watch.Interface, error) { + if opts.Recursive && !strings.HasSuffix(key, "/") { key += "/" } - wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, transformer, pred) - go wc.run() + if opts.ProgressNotify && w.newFunc == nil { + return nil, apierrors.NewInternalError(errors.New("progressNotify for watch is unsupported by the etcd storage because no newFunc was provided")) + } + startWatchRV, err := w.getStartWatchResourceVersion(ctx, rev, opts) + if err != nil { + return nil, err + } + wc := w.createWatchChan(ctx, key, startWatchRV, opts.Recursive, opts.ProgressNotify, opts.Predicate) + go wc.run(isInitialEventsEndBookmarkRequired(opts), areInitialEventsRequired(rev, opts)) // For etcd watch we don't have an easy way to answer whether the watch // has already caught up. So in the initial version (given that watchcache @@ -127,10 +124,9 @@ func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, p return wc, nil } -func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, transformer value.Transformer, pred storage.SelectionPredicate) *watchChan { +func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) *watchChan { wc := &watchChan{ watcher: w, - transformer: transformer, key: key, initialRev: rev, recursive: recursive, @@ -148,6 +144,62 @@ func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, re return wc } +// getStartWatchResourceVersion returns a ResourceVersion +// the watch will be started from. +// Depending on the input parameters the semantics of the returned ResourceVersion are: +// - start at Exact (return resourceVersion) +// - start at Most Recent (return an RV from etcd) +func (w *watcher) getStartWatchResourceVersion(ctx context.Context, resourceVersion int64, opts storage.ListOptions) (int64, error) { + if resourceVersion > 0 { + return resourceVersion, nil + } + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return 0, nil + } + if opts.SendInitialEvents == nil || *opts.SendInitialEvents { + // note that when opts.SendInitialEvents=true + // we will be issuing a consistent LIST request + // against etcd followed by the special bookmark event + return 0, nil + } + // at this point the clients is interested + // only in getting a stream of events + // starting at the MostRecent point in time (RV) + currentStorageRV, err := w.getCurrentStorageRV(ctx) + if err != nil { + return 0, err + } + // currentStorageRV is taken from resp.Header.Revision (int64) + // and cast to uint64, so it is safe to do reverse + // at some point we should unify the interface but that + // would require changing Versioner.UpdateList + return int64(currentStorageRV), nil +} + +// isInitialEventsEndBookmarkRequired since there is no way to directly set +// opts.ProgressNotify from the API and the etcd3 impl doesn't support +// notification for external clients we simply return initialEventsEndBookmarkRequired +// to only send the bookmark event after the initial list call. +// +// see: https://github.com/kubernetes/kubernetes/issues/120348 +func isInitialEventsEndBookmarkRequired(opts storage.ListOptions) bool { + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + return opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks +} + +// areInitialEventsRequired returns true if all events from the etcd should be returned. +func areInitialEventsRequired(resourceVersion int64, opts storage.ListOptions) bool { + if opts.SendInitialEvents == nil && resourceVersion == 0 { + return true // legacy case + } + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + return opts.SendInitialEvents != nil && *opts.SendInitialEvents +} + type etcdError interface { Code() grpccodes.Code Error() string @@ -173,9 +225,9 @@ func isCancelError(err error) bool { return false } -func (wc *watchChan) run() { +func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bool) { watchClosedCh := make(chan struct{}) - go wc.startWatching(watchClosedCh) + go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents) var resultChanWG sync.WaitGroup resultChanWG.Add(1) @@ -225,17 +277,58 @@ func (wc *watchChan) RequestWatchProgress() error { func (wc *watchChan) sync() error { opts := []clientv3.OpOption{} if wc.recursive { - opts = append(opts, clientv3.WithPrefix()) + opts = append(opts, clientv3.WithLimit(defaultWatcherMaxLimit)) + rangeEnd := clientv3.GetPrefixRangeEnd(wc.key) + opts = append(opts, clientv3.WithRange(rangeEnd)) } - getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) - if err != nil { - return err + + var err error + var lastKey []byte + var withRev int64 + var getResp *clientv3.GetResponse + + metricsOp := "get" + if wc.recursive { + metricsOp = "list" } - wc.initialRev = getResp.Header.Revision - for _, kv := range getResp.Kvs { - wc.sendEvent(parseKV(kv)) + + preparedKey := wc.key + + for { + startTime := time.Now() + getResp, err = wc.watcher.client.KV.Get(wc.ctx, preparedKey, opts...) + metrics.RecordEtcdRequest(metricsOp, wc.watcher.groupResource.String(), err, startTime) + if err != nil { + return interpretListError(err, true, preparedKey, wc.key) + } + + if len(getResp.Kvs) == 0 && getResp.More { + return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") + } + + // send items from the response until no more results + for i, kv := range getResp.Kvs { + lastKey = kv.Key + wc.sendEvent(parseKV(kv)) + // free kv early. Long lists can take O(seconds) to decode. + getResp.Kvs[i] = nil + } + + if withRev == 0 { + wc.initialRev = getResp.Header.Revision + } + + // no more results remain + if !getResp.More { + return nil + } + + preparedKey = string(lastKey) + "\x00" + if withRev == 0 { + withRev = getResp.Header.Revision + opts = append(opts, clientv3.WithRev(withRev)) + } } - return nil } func logWatchChannelErr(err error) { @@ -253,14 +346,44 @@ func logWatchChannelErr(err error) { // startWatching does: // - get current objects if initialRev=0; set initialRev to current rev // - watch on given key and send events to process. -func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { - if wc.initialRev == 0 { +// +// initialEventsEndBookmarkSent helps us keep track +// of whether we have sent an annotated bookmark event. +// +// it's important to note that we don't +// need to track the actual RV because +// we only send the bookmark event +// after the initial list call. +// +// when this variable is set to false, +// it means we don't have any specific +// preferences for delivering bookmark events. +func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEndBookmarkRequired, forceInitialEvents bool) { + if wc.initialRev > 0 && forceInitialEvents { + currentStorageRV, err := wc.watcher.getCurrentStorageRV(wc.ctx) + if err != nil { + wc.sendError(err) + return + } + if uint64(wc.initialRev) > currentStorageRV { + wc.sendError(storage.NewTooLargeResourceVersionError(uint64(wc.initialRev), currentStorageRV, int(wait.Jitter(1*time.Second, 3).Seconds()))) + return + } + } + if forceInitialEvents { if err := wc.sync(); err != nil { klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } } + if initialEventsEndBookmarkRequired { + wc.sendEvent(func() *event { + e := progressNotifyEvent(wc.initialRev) + e.isInitialEventsEndBookmark = true + return e + }()) + } opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} if wc.recursive { opts = append(opts, clientv3.WithPrefix()) @@ -352,14 +475,17 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { switch { case e.isProgressNotify: - if wc.watcher.newFunc == nil { - return nil - } object := wc.watcher.newFunc() if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil { klog.Errorf("failed to propagate object version: %v", err) return nil } + if e.isInitialEventsEndBookmark { + if err := storage.AnnotateInitialEventsEndBookmark(object); err != nil { + wc.sendError(fmt.Errorf("error while accessing object's metadata gr: %v, type: %v, obj: %#v, err: %v", wc.watcher.groupResource, wc.watcher.objectType, object, err)) + return nil + } + } res = &watch.Event{ Type: watch.Bookmark, Object: object, @@ -447,7 +573,7 @@ func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtim } if !e.isDeleted { - data, _, err := wc.transformer.TransformFromStorage(wc.ctx, e.value, authenticatedDataString(e.key)) + data, _, err := wc.watcher.transformer.TransformFromStorage(wc.ctx, e.value, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } @@ -462,7 +588,7 @@ func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtim // we need the object only to compute whether it was filtered out // before). if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { - data, _, err := wc.transformer.TransformFromStorage(wc.ctx, e.prevValue, authenticatedDataString(e.key)) + data, _, err := wc.watcher.transformer.TransformFromStorage(wc.ctx, e.prevValue, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index 76123fde8..548966080 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -282,6 +282,19 @@ type ListOptions struct { Recursive bool // ProgressNotify determines whether storage-originated bookmark (progress notify) events should // be delivered to the users. The option is ignored for non-watch requests. + // + // Firstly, note that this field is different from the Predicate.AllowWatchBookmarks field. + // Secondly, this field is intended for internal clients only such as the watch cache. + // + // This means that external clients do not have the ability to set this field directly. + // For example by setting the allowWatchBookmarks query parameter. + // + // The motivation for this approach is the fact that the frequency + // of bookmark events from a storage like etcd might be very high. + // As the number of watch requests increases, the server load would also increase. + // + // Furthermore, the server is not obligated to provide bookmark events at all, + // as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/956-watch-bookmark#proposal ProgressNotify bool // SendInitialEvents, when set together with Watch option, // begin the watch stream with synthetic init events to build the diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 47534c978..93b1e707f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -62,11 +62,6 @@ type Config struct { Prefix string // Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. Transport TransportConfig - // Paging indicates whether the server implementation should allow paging (if it is - // supported). This is generally configured by feature gating, or by a specific - // resource type not wishing to allow paging, and is not intended for end users to - // set. - Paging bool Codec runtime.Codec // EncodeVersioner is the same groupVersioner used to build the @@ -115,7 +110,6 @@ func (config *Config) ForResource(resource schema.GroupResource) *ConfigForResou func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ - Paging: true, Prefix: prefix, Codec: codec, CompactionInterval: DefaultCompactInterval, diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 94cb3710b..2aab5c76d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -419,7 +419,7 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration }, nil } -func newETCD3Storage(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { +func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, DestroyFunc, error) { stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval) if err != nil { return nil, nil, err @@ -454,7 +454,7 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc func() runtime. if transformer == nil { transformer = identity.NewEncryptCheckTransformer() } - return etcd3.New(client, c.Codec, newFunc, c.Prefix, c.GroupResource, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil + return etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig), destroyFunc, nil } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go index 1a60c9290..2bf3727e8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -30,12 +30,12 @@ import ( type DestroyFunc func() // Create creates a storage backend based on given config. -func Create(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { +func Create(c storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, DestroyFunc, error) { switch c.Type { case storagebackend.StorageTypeETCD2: return nil, nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: - return newETCD3Storage(c, newFunc) + return newETCD3Storage(c, newFunc, newListFunc, resourcePrefix) default: return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/util.go b/vendor/k8s.io/apiserver/pkg/storage/util.go index 9da8d9713..6d5fb36d2 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/util.go +++ b/vendor/k8s.io/apiserver/pkg/storage/util.go @@ -17,14 +17,25 @@ limitations under the License. package storage import ( + "context" "fmt" + "strconv" "sync/atomic" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" ) +const ( + // initialEventsAnnotationKey the name of the key + // under which an annotation marking the end of list stream + // is kept. + initialEventsAnnotationKey = "k8s.io/initial-events-end" +) + type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) // SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc @@ -79,3 +90,72 @@ func (hwm *HighWaterMark) Update(current int64) bool { } } } + +// GetCurrentResourceVersionFromStorage gets the current resource version from the underlying storage engine. +// This method issues an empty list request and reads only the ResourceVersion from the object metadata +func GetCurrentResourceVersionFromStorage(ctx context.Context, storage Interface, newListFunc func() runtime.Object, resourcePrefix, objectType string) (uint64, error) { + if storage == nil { + return 0, fmt.Errorf("storage wasn't provided for %s", objectType) + } + if newListFunc == nil { + return 0, fmt.Errorf("newListFunction wasn't provided for %s", objectType) + } + emptyList := newListFunc() + pred := SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, // just in case we actually hit something + } + + err := storage.GetList(ctx, resourcePrefix, ListOptions{Predicate: pred}, emptyList) + if err != nil { + return 0, err + } + emptyListAccessor, err := meta.ListAccessor(emptyList) + if err != nil { + return 0, err + } + if emptyListAccessor == nil { + return 0, fmt.Errorf("unable to extract a list accessor from %T", emptyList) + } + + currentResourceVersion, err := strconv.Atoi(emptyListAccessor.GetResourceVersion()) + if err != nil { + return 0, err + } + + if currentResourceVersion == 0 { + return 0, fmt.Errorf("the current resource version must be greater than 0") + } + return uint64(currentResourceVersion), nil +} + +// AnnotateInitialEventsEndBookmark adds a special annotation to the given object +// which indicates that the initial events have been sent. +// +// Note that this function assumes that the obj's annotation +// field is a reference type (i.e. a map). +func AnnotateInitialEventsEndBookmark(obj runtime.Object) error { + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + objAnnotations := objMeta.GetAnnotations() + if objAnnotations == nil { + objAnnotations = map[string]string{} + } + objAnnotations[initialEventsAnnotationKey] = "true" + objMeta.SetAnnotations(objAnnotations) + return nil +} + +// HasInitialEventsEndBookmarkAnnotation checks the presence of the +// special annotation which marks that the initial events have been sent. +func HasInitialEventsEndBookmarkAnnotation(obj runtime.Object) (bool, error) { + objMeta, err := meta.Accessor(obj) + if err != nil { + return false, err + } + objAnnotations := objMeta.GetAnnotations() + return objAnnotations[initialEventsAnnotationKey] == "true", nil +} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go index c677f54b5..bc7f04b9c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go @@ -26,6 +26,7 @@ import ( utilcache "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apiserver/pkg/storage/value" + "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" "k8s.io/utils/clock" ) @@ -38,10 +39,13 @@ type simpleCache struct { ttl time.Duration // hashPool is a per cache pool of hash.Hash (to avoid allocations from building the Hash) // SHA-256 is used to prevent collisions - hashPool *sync.Pool + hashPool *sync.Pool + providerName string + mu sync.Mutex // guards call to set + recordCacheSize func(providerName string, size int) // for unit tests } -func newSimpleCache(clock clock.Clock, ttl time.Duration) *simpleCache { +func newSimpleCache(clock clock.Clock, ttl time.Duration, providerName string) *simpleCache { cache := utilcache.NewExpiringWithClock(clock) cache.AllowExpiredGet = true // for a given key, the value (the decryptTransformer) is always the same return &simpleCache{ @@ -52,6 +56,8 @@ func newSimpleCache(clock clock.Clock, ttl time.Duration) *simpleCache { return sha256.New() }, }, + providerName: providerName, + recordCacheSize: metrics.RecordDekSourceCacheSize, } } @@ -66,6 +72,8 @@ func (c *simpleCache) get(key []byte) value.Read { // set caches the record for the key func (c *simpleCache) set(key []byte, transformer value.Read) { + c.mu.Lock() + defer c.mu.Unlock() if len(key) == 0 { panic("key must not be empty") } @@ -73,6 +81,8 @@ func (c *simpleCache) set(key []byte, transformer value.Read) { panic("transformer must not be nil") } c.cache.Set(c.keyFunc(key), transformer, c.ttl) + // Add metrics for cache size + c.recordCacheSize(c.providerName, c.cache.Len()) } // keyFunc generates a string key by hashing the inputs. diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go index 45d5db58b..a20b10fc3 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go @@ -28,6 +28,7 @@ import ( "unsafe" "github.com/gogo/protobuf/proto" + "go.opentelemetry.io/otel/attribute" "golang.org/x/crypto/cryptobyte" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -39,21 +40,22 @@ import ( aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" kmstypes "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2" "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" + "k8s.io/component-base/tracing" "k8s.io/klog/v2" kmsservice "k8s.io/kms/pkg/service" "k8s.io/utils/clock" ) -// TODO integration test with old AES GCM data recorded and new KDF data recorded - func init() { value.RegisterMetrics() metrics.RegisterMetrics() } const ( - // KMSAPIVersion is the version of the KMS API. - KMSAPIVersion = "v2beta1" + // KMSAPIVersionv2 is a version of the KMS API. + KMSAPIVersionv2 = "v2" + // KMSAPIVersionv2beta1 is a version of the KMS API. + KMSAPIVersionv2beta1 = "v2beta1" // annotationsMaxSize is the maximum size of the annotations. annotationsMaxSize = 32 * 1024 // 32 kB // KeyIDMaxSize is the maximum size of the keyID. @@ -112,32 +114,51 @@ type envelopeTransformer struct { stateFunc StateFunc // cache is a thread-safe expiring lru cache which caches decrypted DEKs indexed by their encrypted form. - cache *simpleCache + cache *simpleCache + apiServerID string } // NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. // It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to // the data items they encrypt. -func NewEnvelopeTransformer(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc) value.Transformer { - return newEnvelopeTransformerWithClock(envelopeService, providerName, stateFunc, cacheTTL, clock.RealClock{}) +func NewEnvelopeTransformer(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, apiServerID string) value.Transformer { + return newEnvelopeTransformerWithClock(envelopeService, providerName, stateFunc, apiServerID, cacheTTL, clock.RealClock{}) } -func newEnvelopeTransformerWithClock(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, cacheTTL time.Duration, clock clock.Clock) value.Transformer { +func newEnvelopeTransformerWithClock(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, apiServerID string, cacheTTL time.Duration, clock clock.Clock) value.Transformer { return &envelopeTransformer{ envelopeService: envelopeService, providerName: providerName, stateFunc: stateFunc, - cache: newSimpleCache(clock, cacheTTL), + cache: newSimpleCache(clock, cacheTTL, providerName), + apiServerID: apiServerID, } } // TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + ctx, span := tracing.Start(ctx, "TransformFromStorage with envelopeTransformer", + attribute.String("transformer.provider.name", t.providerName), + // The service.instance_id of the apiserver is already available in the trace + /* + { + "key": "service.instance.id", + "type": "string", + "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" + } + */ + ) + defer span.End(500 * time.Millisecond) + + span.AddEvent("About to decode encrypted object") // Deserialize the EncryptedObject from the data. encryptedObject, err := t.doDecode(data) if err != nil { + span.AddEvent("Decoding encrypted object failed") + span.RecordError(err) return nil, false, err } + span.AddEvent("Decoded encrypted object") useSeed := encryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED @@ -158,6 +179,7 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // fallback to the envelope service if we do not have the transformer locally if transformer == nil { + span.AddEvent("About to decrypt DEK using remote service") value.RecordCacheMiss() requestInfo := getRequestInfoFromContext(ctx) @@ -172,21 +194,28 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b Annotations: encryptedObject.Annotations, }) if err != nil { + span.AddEvent("DEK decryption failed") + span.RecordError(err) return nil, false, fmt.Errorf("failed to decrypt DEK, error: %w", err) } + span.AddEvent("DEK decryption succeeded") transformer, err = t.addTransformerForDecryption(encryptedObjectCacheKey, key, useSeed) if err != nil { return nil, false, err } } - metrics.RecordKeyID(metrics.FromStorageLabel, t.providerName, encryptedObject.KeyID) + metrics.RecordKeyID(metrics.FromStorageLabel, t.providerName, encryptedObject.KeyID, t.apiServerID) + span.AddEvent("About to decrypt data using DEK") out, stale, err := transformer.TransformFromStorage(ctx, encryptedObject.EncryptedData, dataCtx) if err != nil { + span.AddEvent("Data decryption failed") + span.RecordError(err) return nil, false, err } + span.AddEvent("Data decryption succeeded") // data is considered stale if the key ID does not match our current write transformer return out, stale || @@ -197,6 +226,19 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // TransformToStorage encrypts data to be written to disk using envelope encryption. func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { + ctx, span := tracing.Start(ctx, "TransformToStorage with envelopeTransformer", + attribute.String("transformer.provider.name", t.providerName), + // The service.instance_id of the apiserver is already available in the trace + /* + { + "key": "service.instance.id", + "type": "string", + "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" + } + */ + ) + defer span.End(500 * time.Millisecond) + state, err := t.stateFunc() if err != nil { return nil, err @@ -208,7 +250,6 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt // this prevents a cache miss every time the DEK rotates // this has the side benefit of causing the cache to perform a GC // TODO see if we can do this inside the stateFunc control loop - // TODO(aramase): Add metrics for cache size. t.cache.set(state.CacheKey, state.Transformer) requestInfo := getRequestInfoFromContext(ctx) @@ -216,18 +257,31 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt "group", requestInfo.APIGroup, "version", requestInfo.APIVersion, "resource", requestInfo.Resource, "subresource", requestInfo.Subresource, "verb", requestInfo.Verb, "namespace", requestInfo.Namespace, "name", requestInfo.Name) + span.AddEvent("About to encrypt data using DEK") result, err := state.Transformer.TransformToStorage(ctx, data, dataCtx) if err != nil { + span.AddEvent("Data encryption failed") + span.RecordError(err) return nil, err } + span.AddEvent("Data encryption succeeded") - metrics.RecordKeyID(metrics.ToStorageLabel, t.providerName, state.EncryptedObject.KeyID) + metrics.RecordKeyID(metrics.ToStorageLabel, t.providerName, state.EncryptedObject.KeyID, t.apiServerID) encObjectCopy := state.EncryptedObject encObjectCopy.EncryptedData = result + span.AddEvent("About to encode encrypted object") // Serialize the EncryptedObject to a byte array. - return t.doEncode(&encObjectCopy) + out, err := t.doEncode(&encObjectCopy) + if err != nil { + span.AddEvent("Encoding encrypted object failed") + span.RecordError(err) + return nil, err + } + span.AddEvent("Encoded encrypted object") + + return out, nil } // addTransformerForDecryption inserts a new transformer to the Envelope cache of DEKs for future reads. @@ -250,7 +304,6 @@ func (t *envelopeTransformer) addTransformerForDecryption(cacheKey []byte, key [ if err != nil { return nil, err } - // TODO(aramase): Add metrics for cache size. t.cache.set(cacheKey, transformer) return transformer, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go index 811c8f67d..25150288f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go @@ -71,11 +71,20 @@ type EncryptedObject struct { // EncryptedData is the encrypted data. EncryptedData []byte `protobuf:"bytes,1,opt,name=encryptedData,proto3" json:"encryptedData,omitempty"` // KeyID is the KMS key ID used for encryption operations. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. KeyID string `protobuf:"bytes,2,opt,name=keyID,proto3" json:"keyID,omitempty"` // EncryptedDEKSource is the ciphertext of the source of the DEK used to encrypt the data stored in encryptedData. // encryptedDEKSourceType defines the process of using the plaintext of this field to determine the aforementioned DEK. + // encryptedDEKSource must satisfy the following constraints: + // 1. The encrypted DEK source is not empty. + // 2. The size of encrypted DEK source is less than 1 kB. EncryptedDEKSource []byte `protobuf:"bytes,3,opt,name=encryptedDEKSource,proto3" json:"encryptedDEKSource,omitempty"` // Annotations is additional metadata that was provided by the KMS plugin. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. Annotations map[string][]byte `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // encryptedDEKSourceType defines the process of using the plaintext of encryptedDEKSource to determine the DEK. EncryptedDEKSourceType EncryptedDEKSourceType `protobuf:"varint,5,opt,name=encryptedDEKSourceType,proto3,enum=v2.EncryptedDEKSourceType" json:"encryptedDEKSourceType,omitempty"` diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto index ec1eb2680..674b3f4a9 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto @@ -26,13 +26,22 @@ message EncryptedObject { bytes encryptedData = 1; // KeyID is the KMS key ID used for encryption operations. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. string keyID = 2; // EncryptedDEKSource is the ciphertext of the source of the DEK used to encrypt the data stored in encryptedData. // encryptedDEKSourceType defines the process of using the plaintext of this field to determine the aforementioned DEK. + // encryptedDEKSource must satisfy the following constraints: + // 1. The encrypted DEK source is not empty. + // 2. The size of encrypted DEK source is less than 1 kB. bytes encryptedDEKSource = 3; // Annotations is additional metadata that was provided by the KMS plugin. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. map annotations = 4; // encryptedDEKSourceType defines the process of using the plaintext of encryptedDEKSource to determine the DEK. diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go index ff3903805..63723648c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go @@ -44,6 +44,7 @@ type metricLabels struct { transformationType string providerName string keyIDHash string + apiServerIDHash string } /* @@ -107,21 +108,21 @@ var ( // keyIDHashTotal is the number of times a keyID is used // e.g. apiserver_envelope_encryption_key_id_hash_total counter - // apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256", + // apiserver_envelope_encryption_key_id_hash_total{apiserver_id_hash="sha256",key_id_hash="sha256", // provider_name="providerName",transformation_type="from_storage"} 1 KeyIDHashTotal = metrics.NewCounterVec( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "key_id_hash_total", - Help: "Number of times a keyID is used split by transformation type and provider.", + Help: "Number of times a keyID is used split by transformation type, provider, and apiserver identity.", StabilityLevel: metrics.ALPHA, }, - []string{"transformation_type", "provider_name", "key_id_hash"}, + []string{"transformation_type", "provider_name", "key_id_hash", "apiserver_id_hash"}, ) // keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used - // e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09 + // e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{apiserver_id_hash="sha256",key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09 KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec( &metrics.GaugeOpts{ Namespace: namespace, @@ -130,11 +131,11 @@ var ( Help: "The last time in seconds when a keyID was used.", StabilityLevel: metrics.ALPHA, }, - []string{"transformation_type", "provider_name", "key_id_hash"}, + []string{"transformation_type", "provider_name", "key_id_hash", "apiserver_id_hash"}, ) // keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call. - // e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09 + // e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{apiserver_id_hash="sha256",key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09 KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec( &metrics.GaugeOpts{ Namespace: namespace, @@ -143,7 +144,7 @@ var ( Help: "The last time in seconds when a keyID was returned by the Status RPC call.", StabilityLevel: metrics.ALPHA, }, - []string{"provider_name", "key_id_hash"}, + []string{"provider_name", "key_id_hash", "apiserver_id_hash"}, ) InvalidKeyIDFromStatusTotal = metrics.NewCounterVec( @@ -156,6 +157,17 @@ var ( }, []string{"provider_name", "error"}, ) + + DekSourceCacheSize = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dek_source_cache_size", + Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.", + StabilityLevel: metrics.ALPHA, + }, + []string{"provider_name"}, + ) ) var registerMetricsFunc sync.Once @@ -171,19 +183,19 @@ func registerLRUMetrics() { keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) { item := key.(metricLabels) - if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted { + if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType, - "providerName", item.providerName, "keyIDHash", item.keyIDHash) + "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) } - if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted { + if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType, - "providerName", item.providerName, "keyIDHash", item.keyIDHash) + "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) } }) keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) { item := key.(metricLabels) - if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted { - klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash) + if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { + klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) } }) } @@ -197,6 +209,7 @@ func RegisterMetrics() { } legacyregistry.MustRegister(dekCacheFillPercent) legacyregistry.MustRegister(dekCacheInterArrivals) + legacyregistry.MustRegister(DekSourceCacheSize) legacyregistry.MustRegister(KeyIDHashTotal) legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds) legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds) @@ -206,22 +219,22 @@ func RegisterMetrics() { } // RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations -func RecordKeyID(transformationType, providerName, keyID string) { +func RecordKeyID(transformationType, providerName, keyID, apiServerID string) { lockRecordKeyID.Lock() defer lockRecordKeyID.Unlock() - keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID) - KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc() - KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime() + keyIDHash, apiServerIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID, apiServerID) + KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash, apiServerIDHash).Inc() + KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash, apiServerIDHash).SetToCurrentTime() } // RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call. -func RecordKeyIDFromStatus(providerName, keyID string) { +func RecordKeyIDFromStatus(providerName, keyID, apiServerID string) { lockRecordKeyIDStatus.Lock() defer lockRecordKeyIDStatus.Unlock() - keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID) - KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime() + keyIDHash, apiServerIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID, apiServerID) + KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash, apiServerIDHash).SetToCurrentTime() } func RecordInvalidKeyIDFromStatus(providerName, errCode string) { @@ -255,6 +268,10 @@ func RecordDekCacheFillPercent(percent float64) { dekCacheFillPercent.Set(percent) } +func RecordDekSourceCacheSize(providerName string, size int) { + DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size)) +} + // RecordKMSOperationLatency records the latency of KMS operation. func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) { KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds()) @@ -281,24 +298,25 @@ func getErrorCode(err error) string { } func getHash(data string) string { + if len(data) == 0 { + return "" + } h := hashPool.Get().(hash.Hash) h.Reset() h.Write([]byte(data)) - result := fmt.Sprintf("sha256:%x", h.Sum(nil)) + dataHash := fmt.Sprintf("sha256:%x", h.Sum(nil)) hashPool.Put(h) - return result + return dataHash } -func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string { - keyIDHash := "" - // only get hash if the keyID is not empty - if len(keyID) > 0 { - keyIDHash = getHash(keyID) - } +func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID, apiServerID string) (string, string) { + keyIDHash := getHash(keyID) + apiServerIDHash := getHash(apiServerID) c.Add(metricLabels{ transformationType: transformationType, providerName: providerName, keyIDHash: keyIDHash, + apiServerIDHash: apiServerIDHash, }, nil) // value is irrelevant, this is a set and not a map - return keyIDHash + return keyIDHash, apiServerIDHash } diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go index ffc1a0e40..9a6b2a28e 100644 --- a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -19,7 +19,7 @@ package apihelpers import ( "sort" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" ) // SetFlowSchemaCondition sets conditions. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index 708bf2cde..d40cae509 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -50,10 +50,10 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" - flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" - flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" + flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1" ) const timeFmt = "2006-01-02T15:04:05.999" @@ -143,16 +143,13 @@ type configController struct { fsLister flowcontrollister.FlowSchemaLister fsInformerSynced cache.InformerSynced - flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface + flowcontrolClient flowcontrolclient.FlowcontrolV1Interface // serverConcurrencyLimit is the limit on the server's total // number of non-exempt requests being served at once. This comes // from server configuration. serverConcurrencyLimit int - // requestWaitLimit comes from server configuration. - requestWaitLimit time.Duration - // watchTracker implements the necessary WatchTracker interface. WatchTracker @@ -263,9 +260,15 @@ type seatDemandStats struct { } func (stats *seatDemandStats) update(obs fq.IntegratorResults) { + stats.highWatermark = obs.Max + if obs.Duration <= 0 { + return + } + if math.IsNaN(obs.Deviation) { + obs.Deviation = 0 + } stats.avg = obs.Average stats.stdDev = obs.Deviation - stats.highWatermark = obs.Max envelope := obs.Average + obs.Deviation stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope) } @@ -281,19 +284,18 @@ func newTestableController(config TestableConfig) *configController { asFieldManager: config.AsFieldManager, foundToDangling: config.FoundToDangling, serverConcurrencyLimit: config.ServerConcurrencyLimit, - requestWaitLimit: config.RequestWaitLimit, flowcontrolClient: config.FlowcontrolClient, priorityLevelStates: make(map[string]*priorityLevelState), WatchTracker: NewWatchTracker(), MaxSeatsTracker: NewMaxSeatsTracker(), } - klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager) + klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") // ensure the data structure reflects the mandatory config cfgCtlr.lockAndDigestConfigObjects(nil, nil) - fci := config.InformerFactory.Flowcontrol().V1beta3() + fci := config.InformerFactory.Flowcontrol().V1() pli := fci.PriorityLevelConfigurations() fsi := fci.FlowSchemas() cfgCtlr.plLister = pli.Lister() @@ -427,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta plState := plStates[plName] if setCompleters { qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues, - plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl) @@ -651,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro // Supply missing mandatory PriorityLevelConfiguration objects if !meal.haveExemptPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt) } if !meal.haveCatchAllPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll) } meal.finishQueueSetReconfigsLocked() @@ -686,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi } } qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, - pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs, + pl, state.reqsGaugePair, state.execSeatsObs, metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge)) if err != nil { klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) @@ -700,7 +702,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi state.quiescing = false } nominalConcurrencyShares, _, _ := plSpecCommons(state.pl) - meal.shareSum += float64(nominalConcurrencyShares) + meal.shareSum += float64(*nominalConcurrencyShares) meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll } @@ -792,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() { } var err error plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, - plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { // This can not happen because queueSetCompleterForPL already approved this config @@ -805,7 +807,7 @@ func (meal *cfgMeal) processOldPLsLocked() { // allocation determined by all the share values in the // regular way. nominalConcurrencyShares, _, _ := plSpecCommons(plState.pl) - meal.shareSum += float64(nominalConcurrencyShares) + meal.shareSum += float64(*nominalConcurrencyShares) meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll meal.newPLStates[plName] = plState @@ -821,7 +823,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // The use of math.Ceil here means that the results might sum // to a little more than serverConcurrencyLimit but the // difference will be negligible. - concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(nominalConcurrencyShares) / meal.shareSum)) + concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(*nominalConcurrencyShares) / meal.shareSum)) var lendableCL, borrowingCL int if lendablePercent != nil { lendableCL = int(math.Round(float64(concurrencyLimit) * float64(*lendablePercent) / 100)) @@ -874,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // queueSetCompleterForPL returns an appropriate QueueSetCompleter for the // given priority level configuration. Returns nil and an error if the given // object is malformed in a way that is a problem for this package. -func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) { return nil, errors.New("broken union structure at the top, for Limited") } @@ -896,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow DesiredNumQueues: int(qcAPI.Queues), QueueLengthLimit: int(qcAPI.QueueLengthLimit), HandSize: int(qcAPI.HandSize), - RequestWaitLimit: requestWaitLimit, } } } else { @@ -950,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl // imaginePL adds a priority level based on one of the mandatory ones // that does not actually exist (right now) as a real API object. -func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) { klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) labelValues := []string{proto.Name} reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues) execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues) seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name) seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name}) - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, - requestWaitLimit, reqsGaugePair, execSeatsObs, - metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair, + execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) if err != nil { // This can not happen because proto is one of the mandatory // objects and these are not erroneous @@ -974,7 +974,7 @@ func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, re seatDemandRatioedGauge: seatDemandRatioedGauge, } nominalConcurrencyShares, _, _ := plSpecCommons(proto) - meal.shareSum += float64(nominalConcurrencyShares) + meal.shareSum += float64(*nominalConcurrencyShares) } // startRequest classifies and, if appropriate, enqueues the request. @@ -1112,7 +1112,7 @@ func relDiff(x, y float64) float64 { } // plSpecCommons returns the (NominalConcurrencyShares, LendablePercent, BorrowingLimitPercent) of the given priority level config -func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (int32, *int32, *int32) { +func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (*int32, *int32, *int32) { if limiter := pl.Spec.Limited; limiter != nil { return limiter.NominalConcurrencyShares, limiter.LendablePercent, limiter.BorrowingLimitPercent } @@ -1121,5 +1121,5 @@ func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (int32, *int32, * if limiter.NominalConcurrencyShares != nil { nominalConcurrencyShares = *limiter.NominalConcurrencyShares } - return nominalConcurrencyShares, limiter.LendablePercent, nil + return &nominalConcurrencyShares, limiter.LendablePercent, nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 76782623a..2a4bf10f7 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -33,8 +33,8 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" ) // ConfigConsumerAsFieldManager is how the config consuminng @@ -88,9 +88,8 @@ type Interface interface { // New creates a new instance to implement API priority and fairness func New( informerFactory kubeinformers.SharedInformerFactory, - flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface, + flowcontrolClient flowcontrolclient.FlowcontrolV1Interface, serverConcurrencyLimit int, - requestWaitLimit time.Duration, ) Interface { clk := eventclock.Real{} return NewTestable(TestableConfig{ @@ -101,7 +100,6 @@ func New( InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: serverConcurrencyLimit, - RequestWaitLimit: requestWaitLimit, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), @@ -134,14 +132,11 @@ type TestableConfig struct { InformerFactory kubeinformers.SharedInformerFactory // FlowcontrolClient to use for manipulating config objects - FlowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface + FlowcontrolClient flowcontrolclient.FlowcontrolV1Interface // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int - // RequestWaitLimit configured on the server - RequestWaitLimit time.Duration - // GaugeVec for metrics about requests, broken down by phase and priority_level ReqsGaugeVec metrics.RatioedGaugeVec diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 013fd41e0..3b0ad1638 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -18,7 +18,6 @@ package fairqueuing import ( "context" - "time" "k8s.io/apiserver/pkg/util/flowcontrol/debug" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" @@ -117,7 +116,7 @@ type QueuingConfig struct { // DesiredNumQueues is the number of queues that the API says // should exist now. This may be non-positive, in which case - // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + // QueueLengthLimit, and HandSize are ignored. // A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig. // A negative value means to always dispatch immediately upon arrival // (i.e., the requests are "exempt" from limitation). @@ -129,10 +128,6 @@ type QueuingConfig struct { // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly // dealing a "hand" of this many queues and then picking one of minimum length. HandSize int - - // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. - // If, by the end of that time, the request has not been dispatched then it is rejected. - RequestWaitLimit time.Duration } // DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go index d3bda40aa..79d19d136 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go @@ -17,12 +17,13 @@ limitations under the License. package promise import ( + "context" "sync" ) // promise implements the WriteOnce interface. type promise struct { - doneCh <-chan struct{} + doneCtx context.Context doneVal interface{} setCh chan struct{} onceler sync.Once @@ -35,12 +36,12 @@ var _ WriteOnce = &promise{} // // If `initial` is non-nil then that value is Set at creation time. // -// If a `Get` is waiting soon after `doneCh` becomes selectable (which -// never happens for the nil channel) then `Set(doneVal)` effectively -// happens at that time. -func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) WriteOnce { +// If a `Get` is waiting soon after the channel associated with the +// `doneCtx` becomes selectable (which never happens for the nil +// channel) then `Set(doneVal)` effectively happens at that time. +func NewWriteOnce(initial interface{}, doneCtx context.Context, doneVal interface{}) WriteOnce { p := &promise{ - doneCh: doneCh, + doneCtx: doneCtx, doneVal: doneVal, setCh: make(chan struct{}), } @@ -53,7 +54,7 @@ func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface func (p *promise) Get() interface{} { select { case <-p.setCh: - case <-p.doneCh: + case <-p.doneCtx.Done(): p.Set(p.doneVal) } return p.value diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index aa54a9ccf..b675bb545 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -53,7 +53,7 @@ type queueSetFactory struct { // - whose Set method is invoked with the queueSet locked, and // - whose Get method is invoked with the queueSet not locked. // The parameters are the same as for `promise.NewWriteOnce`. -type promiseFactory func(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) promise.WriteOnce +type promiseFactory func(initial interface{}, doneCtx context.Context, doneVal interface{}) promise.WriteOnce // promiseFactoryFactory returns the promiseFactory to use for the given queueSet type promiseFactoryFactory func(*queueSet) promiseFactory @@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig, } else { qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit qCfg.HandSize = qs.qCfg.HandSize - qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit } qs.qCfg = qCfg @@ -300,9 +299,6 @@ const ( // Serve this one decisionExecute requestDecision = iota - // Reject this one due to APF queuing considerations - decisionReject - // This one's context timed out / was canceled decisionCancel ) @@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo // ======================================================================== // Step 1: // 1) Start with shuffle sharding, to pick a queue. - // 2) Reject old requests that have been waiting too long - // 3) Reject current request if there is not enough concurrency shares and + // 2) Reject current request if there is not enough concurrency shares and // we are at max queue length - // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // 3) If not rejected, create a request and enqueue + req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) { } req.waitStarted = true switch decisionAny { - case decisionReject: - klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) - qs.totRequestsRejected++ - qs.totRequestsTimedout++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") - return false, qs.isIdleLocked() - case decisionCancel: + case decisionCancel: // handle in code following this switch case decisionExecute: klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) return true, false @@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) { } // TODO(aaron-prindle) add metrics for this case klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) - // remove the request from the queue as it has timed out + // remove the request from the queue as its queue wait time has exceeded queue := req.queue if req.removeFromQueueLocked() != nil { defer qs.boundNextDispatchLocked(queue) @@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) { qs.totSeatsWaiting -= req.MaxSeats() qs.totRequestsRejected++ qs.totRequestsCancelled++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled") + metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) + metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats()) req.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) } -// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required // to validate and enqueue a request for the queueSet/QueueSet: // 1) Start with shuffle sharding, to pick a queue. -// 2) Reject old requests that have been waiting too long -// 3) Reject current request if there is not enough concurrency shares and +// 2) Reject current request if there is not enough concurrency shares and // we are at max queue length -// 4) If not rejected, create a request and enqueue +// 3) If not rejected, create a request and enqueue // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { +func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { // Start with the shuffle sharding, to pick a queue. queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] - // The next step is the logic to reject requests that have been waiting too long - qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName) - // NOTE: currently timeout is only checked for each new request. This means that there can be - // requests that are in the queue longer than the timeout if there are no new requests - // We prefer the simplicity over the promptness, at least for now. defer qs.boundNextDispatchLocked(queue) @@ -583,7 +567,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte fsName: fsName, flowDistinguisher: flowDistinguisher, ctx: ctx, - decision: qs.promiseFactory(nil, ctx.Done(), decisionCancel), + decision: qs.promiseFactory(nil, ctx, decisionCancel), arrivalTime: qs.clock.Now(), arrivalR: qs.currentR, queue: queue, @@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac return bestQueueIdx } -// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued -// past the requestWaitLimit -func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) { - timeoutCount := 0 - disqueueSeats := 0 - now := qs.clock.Now() - reqs := queue.requestsWaiting - // reqs are sorted oldest -> newest - // can short circuit loop (break) if oldest requests are not timing out - // as newer requests also will not have timed out - - // now - requestWaitLimit = arrivalLimit - arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit) - reqs.Walk(func(req *request) bool { - if arrivalLimit.After(req.arrivalTime) { - if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil { - timeoutCount++ - disqueueSeats += req.MaxSeats() - req.NoteQueued(false) - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - } - // we need to check if the next request has timed out. - return true - } - // since reqs are sorted oldest -> newest, we are done here. - return false - }) - - // remove timed out requests from queue - if timeoutCount > 0 { - qs.totRequestsWaiting -= timeoutCount - qs.totSeatsWaiting -= disqueueSeats - qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount)) - qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) - } -} - // rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived // request, which has been assigned to a queue. If up against the // queue length limit and the concurrency limit then returns false. @@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) { qs.totRequestsWaiting++ qs.totSeatsWaiting += request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats()) request.NoteQueued(true) qs.reqsGaugePair.RequestsWaiting.Add(1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -722,7 +670,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f flowDistinguisher: flowDistinguisher, ctx: ctx, startTime: now, - decision: qs.promiseFactory(decisionExecute, ctx.Done(), decisionCancel), + decision: qs.promiseFactory(decisionExecute, ctx, decisionCancel), arrivalTime: now, arrivalR: qs.currentR, descr1: descr1, @@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool { qs.totRequestsWaiting-- qs.totSeatsWaiting -= request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats()) request.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) defer qs.boundNextDispatchLocked(queue) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go index 494442373..ced0eac31 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go @@ -21,7 +21,7 @@ import ( "encoding/json" "fmt" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" ) @@ -93,7 +93,7 @@ func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) s return "nil" } var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", + buf.WriteString(fmt.Sprintf("&flowcontrolv1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", pl.ObjectMeta)) BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec) buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status)) @@ -111,7 +111,7 @@ func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigur // BufferPriorityLevelConfigurationSpec writes a golang source // expression for the given value to the given buffer func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) { - buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) + buf.WriteString(fmt.Sprintf("flowcontrolv1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) if plSpec.Limited != nil { buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{NominalConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.NominalConcurrencyShares, plSpec.Limited.LimitResponse.Type)) if plSpec.Limited.LimitResponse.Queuing != nil { @@ -128,7 +128,7 @@ func FmtFlowSchema(fs *flowcontrol.FlowSchema) string { return "nil" } var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.FlowSchema{ObjectMeta: %#+v, Spec: ", + buf.WriteString(fmt.Sprintf("&flowcontrolv1.FlowSchema{ObjectMeta: %#+v, Spec: ", fs.ObjectMeta)) BufferFlowSchemaSpec(&buf, &fs.Spec) buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status)) @@ -146,7 +146,7 @@ func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string { // BufferFlowSchemaSpec writes a golang source expression for the // given value to the given buffer func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) { - buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", + buf.WriteString(fmt.Sprintf("flowcontrolv1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", fsSpec.PriorityLevelConfiguration, fsSpec.MatchingPrecedence)) if fsSpec.DistinguisherMethod == nil { @@ -166,7 +166,7 @@ func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) // FmtPolicyRulesWithSubjects produces a golang source expression of the value. func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string { - return "flowcontrolv1beta3.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) + return "flowcontrolv1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) } // FmtPolicyRulesWithSubjectsSlim produces a golang source expression @@ -182,7 +182,7 @@ func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) st // expression for the given value to the given buffer but excludes the // leading type name func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) { - buf.WriteString("{Subjects: []flowcontrolv1beta3.Subject{") + buf.WriteString("{Subjects: []flowcontrolv1.Subject{") for jdx, subj := range rule.Subjects { if jdx > 0 { buf.WriteString(", ") diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go index 54af4415c..9fe7b15a0 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -210,6 +210,16 @@ var ( }, []string{priorityLevel, flowSchema}, ) + apiserverCurrentInqueueSeats = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_inqueue_seats", + Help: "Number of seats currently pending in queues of the API Priority and Fairness subsystem", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) apiserverRequestQueueLength = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Namespace: namespace, @@ -455,6 +465,7 @@ var ( apiserverNextSBounds, apiserverNextDiscountedSBounds, apiserverCurrentInqueueRequests, + apiserverCurrentInqueueSeats, apiserverRequestQueueLength, apiserverRequestConcurrencyLimit, apiserverRequestConcurrencyInUse, @@ -518,6 +529,11 @@ func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string, apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) } +// AddSeatsInQueues adds the given delta to the gauge of the # of seats in the queues of the specified flowSchema and priorityLevel +func AddSeatsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) { + apiserverCurrentInqueueSeats.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + // AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) { apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go index 8d20867d6..6b941cb7f 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -117,8 +117,7 @@ func (e *listWorkEstimator) estimate(r *http.Request, flowSchemaName, priorityLe } limit := numStored - if utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) && listOptions.Limit > 0 && - listOptions.Limit < numStored { + if listOptions.Limit > 0 && listOptions.Limit < numStored { limit = listOptions.Limit } @@ -165,15 +164,14 @@ func key(requestInfo *apirequest.RequestInfo) string { func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { resourceVersion := opts.ResourceVersion match := opts.ResourceVersionMatch - pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) // Serve consistent reads from storage if ConsistentListFromCache is disabled consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled // Watch cache doesn't support continuations, so serve them from etcd. - hasContinuation := pagingEnabled && len(opts.Continue) > 0 + hasContinuation := len(opts.Continue) > 0 // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := pagingEnabled && opts.Limit > 0 && resourceVersion != "0" + hasLimit := opts.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go index a404d3286..03c18b8e1 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go @@ -19,7 +19,7 @@ package flowcontrol import ( "strings" - flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrol "k8s.io/api/flowcontrol/v1" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index 95e4060bd..0816b45a1 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -18,9 +18,9 @@ package webhook import ( "fmt" - "io/ioutil" "net" "net/http" + "os" "strconv" "strings" "time" @@ -233,7 +233,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf config.BearerToken = configAuthInfo.Token config.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 191b37318..29ee0e84d 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -31,8 +31,13 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" + apiservervalidation "k8s.io/apiserver/pkg/apis/apiserver/validation" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" + authorizationcel "k8s.io/apiserver/pkg/authorization/cel" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -66,11 +71,12 @@ type WebhookAuthorizer struct { retryBackoff wait.Backoff decisionOnError authorizer.Decision metrics AuthorizerMetrics + celMatcher *authorizationcel.CELMatcher } // NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client -func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { - return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, metrics) +func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { + return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, nil, metrics) } // New creates a new WebhookAuthorizer from the provided kubeconfig file. @@ -92,27 +98,33 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. -func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { +func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition) (*WebhookAuthorizer, error) { subjectAccessReview, err := subjectAccessReviewInterfaceFromConfig(config, version, retryBackoff) if err != nil { return nil, err } - return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, AuthorizerMetrics{ + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, matchConditions, AuthorizerMetrics{ RecordRequestTotal: noopMetrics{}.RecordRequestTotal, RecordRequestLatency: noopMetrics{}.RecordRequestLatency, }) } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { + // compile all expressions once in validation and save the results to be used for eval later + cm, fieldErr := apiservervalidation.ValidateAndCompileMatchConditions(matchConditions) + if err := fieldErr.ToAggregate(); err != nil { + return nil, err + } return &WebhookAuthorizer{ subjectAccessReview: subjectAccessReview, responseCache: cache.NewLRUExpireCache(8192), authorizedTTL: authorizedTTL, unauthorizedTTL: unauthorizedTTL, retryBackoff: retryBackoff, - decisionOnError: authorizer.DecisionNoOpinion, + decisionOnError: decisionOnError, metrics: metrics, + celMatcher: cm, }, nil } @@ -190,6 +202,24 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri Verb: attr.GetVerb(), } } + // skipping match when feature is not enabled + if utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration) { + // Process Match Conditions before calling the webhook + matches, err := w.match(ctx, r) + // If at least one matchCondition evaluates to an error (but none are FALSE): + // If failurePolicy=Deny, then the webhook rejects the request + // If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + if err != nil { + return w.decisionOnError, "", err + } + // If at least one matchCondition successfully evaluates to FALSE, + // then the webhook is skipped. + if !matches { + return authorizer.DecisionNoOpinion, "", nil + } + } + // If all evaluated successfully and ALL matchConditions evaluate to TRUE, + // then the webhook is called. key, err := json.Marshal(r.Spec) if err != nil { return w.decisionOnError, "", err @@ -256,6 +286,18 @@ func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]author return resourceRules, nonResourceRules, incomplete, fmt.Errorf("webhook authorizer does not support user rule resolution") } +// Match is used to evaluate the SubjectAccessReviewSpec against +// the authorizer's matchConditions in the form of cel expressions +// to return match or no match found, which then is used to +// determine if the webhook should be skipped. +func (w *WebhookAuthorizer) match(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { + // A nil celMatcher or zero saved CompilationResults matches all requests. + if w.celMatcher == nil || w.celMatcher.CompilationResults == nil { + return true, nil + } + return w.celMatcher.Eval(ctx, r) +} + func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.ExtraValue { if extra == nil { return nil diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go new file mode 100644 index 000000000..5aa686782 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use +// with apply. +type ClusterTrustBundleProjectionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + SignerName *string `json:"signerName,omitempty"` + LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Optional *bool `json:"optional,omitempty"` + Path *string `json:"path,omitempty"` +} + +// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with +// apply. +func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { + return &ClusterTrustBundleProjectionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Name = &value + return b +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.SignerName = &value + return b +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { + b.LabelSelector = value + return b +} + +// WithOptional sets the Optional field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Optional field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithOptional(value bool) *ClusterTrustBundleProjectionApplyConfiguration { + b.Optional = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithPath(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Path = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index 6e373dd4e..e4ae9c49f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -24,6 +24,7 @@ type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } // LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with @@ -55,3 +56,11 @@ func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActio b.TCPSocket = value return b } + +// WithSleep sets the Sleep field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sleep field is set to the value of the last call. +func (b *LifecycleHandlerApplyConfiguration) WithSleep(value *SleepActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { + b.Sleep = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index 64d27bdad..a48dac681 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -18,11 +18,16 @@ limitations under the License. package v1 +import ( + v1 "k8s.io/api/core/v1" +) + // LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` + IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } @@ -48,6 +53,14 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load return b } +// WithIPMode sets the IPMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPMode field is set to the value of the last call. +func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { + b.IPMode = &value + return b +} + // WithPorts adds the given value to the Ports field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Ports field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go new file mode 100644 index 000000000..4ff1d040c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use +// with apply. +type ModifyVolumeStatusApplyConfiguration struct { + TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` + Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` +} + +// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with +// apply. +func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { + return &ModifyVolumeStatusApplyConfiguration{} +} + +// WithTargetVolumeAttributesClassName sets the TargetVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetVolumeAttributesClassName field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassName(value string) *ModifyVolumeStatusApplyConfiguration { + b.TargetVolumeAttributesClassName = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index f324584ab..4db12fadb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -26,14 +26,15 @@ import ( // PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeName *string `json:"volumeName,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` - DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` - DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` + DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } // PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with @@ -63,7 +64,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithSelector(value *metav1 // WithResources sets the Resources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *VolumeResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.Resources = value return b } @@ -107,3 +108,11 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *T b.DataSourceRef = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeClaimSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index c29b2a9a1..1f6d5ae32 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,12 +25,14 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` + ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -99,3 +101,19 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceSta } return b } + +// WithCurrentVolumeAttributesClassName sets the CurrentVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentVolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCurrentVolumeAttributesClassName(value string) *PersistentVolumeClaimStatusApplyConfiguration { + b.CurrentVolumeAttributesClassName = &value + return b +} + +// WithModifyVolumeStatus sets the ModifyVolumeStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ModifyVolumeStatus field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithModifyVolumeStatus(value *ModifyVolumeStatusApplyConfiguration) *PersistentVolumeClaimStatusApplyConfiguration { + b.ModifyVolumeStatus = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index b3a72b1c3..8a30dab64 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -34,6 +34,7 @@ type PersistentVolumeSpecApplyConfiguration struct { MountOptions []string `json:"mountOptions,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } // PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with @@ -285,3 +286,11 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNodeAffinity(value *VolumeN b.NodeAffinity = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index 7d2492203..ac1eab3d8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -29,6 +29,8 @@ type PodAffinityTermApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } // PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with @@ -70,3 +72,23 @@ func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.Labe b.NamespaceSelector = value return b } + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} + +// WithMismatchLabelKeys adds the given value to the MismatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MismatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMismatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MismatchLabelKeys = append(b.MismatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go index 27b49bf15..8b3284536 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1 -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use +// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use // with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` +type SleepActionApplyConfiguration struct { + Seconds *int64 `json:"seconds,omitempty"` } -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with +// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with // apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} +func SleepAction() *SleepActionApplyConfiguration { + return &SleepActionApplyConfiguration{} } -// WithName sets the Name field in the declarative configuration to the given value +// WithSeconds sets the Seconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value +// If called multiple times, the Seconds field is set to the value of the last call. +func (b *SleepActionApplyConfiguration) WithSeconds(value int64) *SleepActionApplyConfiguration { + b.Seconds = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index 8d16ea79e..a2ef0a994 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -25,6 +25,7 @@ type VolumeProjectionApplyConfiguration struct { DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"` ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"` ServiceAccountToken *ServiceAccountTokenProjectionApplyConfiguration `json:"serviceAccountToken,omitempty"` + ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } // VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with @@ -64,3 +65,11 @@ func (b *VolumeProjectionApplyConfiguration) WithServiceAccountToken(value *Serv b.ServiceAccountToken = value return b } + +// WithClusterTrustBundle sets the ClusterTrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterTrustBundle field is set to the value of the last call. +func (b *VolumeProjectionApplyConfiguration) WithClusterTrustBundle(value *ClusterTrustBundleProjectionApplyConfiguration) *VolumeProjectionApplyConfiguration { + b.ClusterTrustBundle = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go new file mode 100644 index 000000000..89ad1da8b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use +// with apply. +type VolumeResourceRequirementsApplyConfiguration struct { + Limits *v1.ResourceList `json:"limits,omitempty"` + Requests *v1.ResourceList `json:"requests,omitempty"` +} + +// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with +// apply. +func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { + return &VolumeResourceRequirementsApplyConfiguration{} +} + +// WithLimits sets the Limits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limits field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Limits = &value + return b +} + +// WithRequests sets the Requests field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Requests field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Requests = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go index 3535d7478..cd21214f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index 507f8e9ab..d9c8a79cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1alpha1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1alpha1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 20251d08b..8809fafba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} b.WithName(name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(flowSchema.Name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 31f5dc13e..808ab09a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1alpha1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSche // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.Condit // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *FlowSchemaConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index fd5fc0ae9..2785f5baf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index db2dacf13..7c61360a5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 0421f3f59..92a03d862 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go similarity index 90% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index 10660e81a..c19f09703 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { - AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` @@ -33,11 +33,11 @@ func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApply return &LimitedPriorityLevelConfigurationApplyConfiguration{} } -// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. -func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { - b.AssuredConcurrencyShares = &value +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go similarity index 88% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 5edaa025c..03ff6d910 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1alpha1.LimitResponseType `json:"type,omitempty"` + Type *v1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1alpha1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index b1f09f530..d9f8c2ecc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index 841104064..b193efa8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index a40db75dc..e8a1b97c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon b := &PriorityLevelConfigurationApplyConfiguration{} b.WithName(name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(priorityLevelConfiguration.Name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index bd91b80f2..6ce588c8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1alpha1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1alpha1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index b477c04df..0638aee8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index ade920a75..5d8874959 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"` + Type *v1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1alpha1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index eb3ef3d61..322871edc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 0fccc3f08..69fd2c23c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index d2c6f4eed..0991ce944 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 270b5225e..55787ca76 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index 83c09d644..f02b03bdc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // SubjectApplyConfiguration represents an declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1alpha1.SubjectKind `json:"kind,omitempty"` + Kind *v1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1alpha1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index a762c249e..2d17c111c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 3ed553662..2ceb26221 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -1013,7 +1013,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1075,7 +1074,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1114,7 +1112,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1227,11 +1224,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1343,7 +1338,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1455,7 +1449,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1586,7 +1579,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1625,11 +1617,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1768,7 +1758,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1899,7 +1888,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1938,7 +1926,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2051,11 +2038,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2167,7 +2152,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2279,7 +2263,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2606,7 +2589,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2860,7 +2842,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -2904,7 +2885,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -2939,7 +2919,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3067,7 +3046,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: @@ -3077,7 +3055,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3102,14 +3079,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3139,7 +3114,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -3276,7 +3250,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3591,11 +3564,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3876,11 +3847,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4002,11 +3971,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4404,6 +4371,25 @@ var schemaYAML = typed.YAMLObject(`types: - name: timeoutSeconds type: scalar: numeric +- name: io.k8s.api.core.v1.ClusterTrustBundleProjection + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + - name: path + type: + scalar: string + default: "" + - name: signerName + type: + scalar: string - name: io.k8s.api.core.v1.ComponentCondition map: fields: @@ -4716,7 +4702,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateTerminated map: fields: @@ -4730,7 +4715,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: finishedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4743,7 +4727,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateWaiting map: fields: @@ -5099,11 +5082,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: firstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: involvedObject type: namedType: io.k8s.api.core.v1.ObjectReference @@ -5114,7 +5095,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5155,7 +5135,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.core.v1.EventSource map: fields: @@ -5338,7 +5317,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: scheme type: scalar: string @@ -5497,6 +5475,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction + - name: sleep + type: + namedType: io.k8s.api.core.v1.SleepAction - name: tcpSocket type: namedType: io.k8s.api.core.v1.TCPSocketAction @@ -5567,6 +5548,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + - name: ipMode + type: + scalar: string - name: ports type: list: @@ -5599,6 +5583,16 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ModifyVolumeStatus + map: + fields: + - name: status + type: + scalar: string + default: "" + - name: targetVolumeAttributesClassName + type: + scalar: string - name: io.k8s.api.core.v1.NFSVolumeSource map: fields: @@ -5640,7 +5634,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5728,11 +5721,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastHeartbeatTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6036,11 +6027,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6072,7 +6061,7 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.core.v1.TypedObjectReference - name: resources type: - namedType: io.k8s.api.core.v1.ResourceRequirements + namedType: io.k8s.api.core.v1.VolumeResourceRequirements default: {} - name: selector type: @@ -6080,6 +6069,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageClassName type: scalar: string + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -6119,6 +6111,12 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type + - name: currentVolumeAttributesClassName + type: + scalar: string + - name: modifyVolumeStatus + type: + namedType: io.k8s.api.core.v1.ModifyVolumeStatus - name: phase type: scalar: string @@ -6239,6 +6237,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageos type: namedType: io.k8s.api.core.v1.StorageOSPersistentVolumeSource + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -6312,6 +6313,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: mismatchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -6346,11 +6359,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6960,7 +6971,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -7036,7 +7046,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: resource type: scalar: string @@ -7459,7 +7468,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetPort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.ServiceSpec map: fields: @@ -7562,6 +7570,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientIP type: namedType: io.k8s.api.core.v1.ClientIPConfig +- name: io.k8s.api.core.v1.SleepAction + map: + fields: + - name: seconds + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource map: fields: @@ -7618,7 +7633,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.Taint map: fields: @@ -7879,6 +7893,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.VolumeProjection map: fields: + - name: clusterTrustBundle + type: + namedType: io.k8s.api.core.v1.ClusterTrustBundleProjection - name: configMap type: namedType: io.k8s.api.core.v1.ConfigMapProjection @@ -7891,6 +7908,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: serviceAccountToken type: namedType: io.k8s.api.core.v1.ServiceAccountTokenProjection +- name: io.k8s.api.core.v1.VolumeResourceRequirements + map: + fields: + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource map: fields: @@ -8156,11 +8186,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8168,7 +8196,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8211,7 +8238,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.events.v1beta1.Event map: fields: @@ -8227,11 +8253,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8239,7 +8263,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8282,7 +8305,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -8310,7 +8332,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8426,11 +8447,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8586,7 +8605,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress map: fields: @@ -8797,7 +8815,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8879,7 +8896,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration map: fields: - name: lendablePercent @@ -8888,14 +8905,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: nominalConcurrencyShares type: scalar: numeric -- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema +- name: io.k8s.api.flowcontrol.v1.FlowSchema map: fields: - name: apiVersion @@ -8910,19 +8927,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8935,50 +8951,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject +- name: io.k8s.api.flowcontrol.v1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse +- name: io.k8s.api.flowcontrol.v1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1.QueuingConfiguration - name: type type: scalar: string @@ -8988,13 +9004,9 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration map: fields: - - name: assuredConcurrencyShares - type: - scalar: numeric - default: 0 - name: borrowingLimitPercent type: scalar: numeric @@ -9003,9 +9015,12 @@ var schemaYAML = typed.YAMLObject(`types: scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -9020,28 +9035,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.Subject + namedType: io.k8s.api.flowcontrol.v1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -9056,19 +9071,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9081,22 +9095,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec map: fields: - name: exempt type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -9108,18 +9122,18 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1.QueuingConfiguration map: fields: - name: handSize @@ -9134,7 +9148,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1.ResourcePolicyRule map: fields: - name: apiGroups @@ -9164,7 +9178,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1.ServiceAccountSubject map: fields: - name: name @@ -9175,22 +9189,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.Subject +- name: io.k8s.api.flowcontrol.v1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + namedType: io.k8s.api.flowcontrol.v1.UserSubject unions: - discriminator: kind fields: @@ -9200,7 +9214,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject +- name: io.k8s.api.flowcontrol.v1.UserSubject map: fields: - name: name @@ -9250,7 +9264,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9396,7 +9409,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9578,7 +9590,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9724,7 +9735,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9906,7 +9916,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -10052,7 +10061,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -10564,7 +10572,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric -- name: io.k8s.api.networking.v1alpha1.ClusterCIDR +- name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: - name: apiVersion @@ -10579,27 +10587,30 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec default: {} -- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec map: fields: - - name: ipv4 + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group type: scalar: string - default: "" - - name: ipv6 + - name: name type: scalar: string - default: "" - - name: nodeSelector + - name: namespace type: - namedType: io.k8s.api.core.v1.NodeSelector - - name: perNodeHostBits + scalar: string + - name: resource type: - scalar: numeric - default: 0 -- name: io.k8s.api.networking.v1alpha1.IPAddress + scalar: string +- name: io.k8s.api.networking.v1alpha1.ServiceCIDR map: fields: - name: apiVersion @@ -10614,32 +10625,32 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec default: {} -- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + - name: status + type: + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec map: fields: - - name: parentRef + - name: cidrs type: - namedType: io.k8s.api.networking.v1alpha1.ParentReference -- name: io.k8s.api.networking.v1alpha1.ParentReference + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus map: fields: - - name: group - type: - scalar: string - - name: name - type: - scalar: string - - name: namespace - type: - scalar: string - - name: resource - type: - scalar: string - - name: uid + - name: conditions type: - scalar: string + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -10695,7 +10706,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.networking.v1beta1.IngressClass map: fields: @@ -11061,29 +11071,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.policy.v1beta1.Eviction map: fields: @@ -11100,40 +11087,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} -- name: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string -- name: io.k8s.api.policy.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.policy.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.policy.v1beta1.PodDisruptionBudget map: fields: @@ -11205,195 +11158,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.rbac.v1.AggregationRule map: fields: @@ -11923,7 +11687,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: selectedNode type: scalar: string @@ -12002,7 +11766,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: @@ -12440,7 +12204,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1.VolumeNodeResources map: fields: @@ -12536,6 +12299,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1alpha1.VolumeError +- name: io.k8s.api.storage.v1alpha1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1alpha1.VolumeError map: fields: @@ -12545,7 +12330,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.CSIDriver map: fields: @@ -12795,7 +12579,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.VolumeNodeResources map: fields: @@ -12810,7 +12593,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -12938,7 +12720,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go index 8a58d9e87..a206bd326 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -125,7 +125,7 @@ func (e *extractor) extractUnstructured(object *unstructured.Unstructured, field return nil, fmt.Errorf("failed to fetch the objectType: %v", err) } result := &unstructured.Unstructured{} - err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) //nolint:forbidigo if err != nil { return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go deleted file mode 100644 index 8d5fa406b..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use -// with apply. -type ClusterCIDRSpecApplyConfiguration struct { - NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` - IPv4 *string `json:"ipv4,omitempty"` - IPv6 *string `json:"ipv6,omitempty"` -} - -// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with -// apply. -func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { - return &ClusterCIDRSpecApplyConfiguration{} -} - -// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { - b.NodeSelector = value - return b -} - -// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PerNodeHostBits field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { - b.PerNodeHostBits = &value - return b -} - -// WithIPv4 sets the IPv4 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv4 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv4 = &value - return b -} - -// WithIPv6 sets the IPv6 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv6 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv6 = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index 14b10b19f..ce1049709 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,18 +18,13 @@ limitations under the License. package v1alpha1 -import ( - types "k8s.io/apimachinery/pkg/types" -) - // ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` - UID *types.UID `json:"uid,omitempty"` + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` } // ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with @@ -69,11 +64,3 @@ func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentRefere b.Name = &value return b } - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { - b.UID = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go similarity index 68% rename from vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index ad0eae919..f6d0a91e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -27,55 +27,56 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use +// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use // with apply. -type ClusterCIDRApplyConfiguration struct { +type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with +// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with // apply. -func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { - b := &ClusterCIDRApplyConfiguration{} +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} b.WithName(name) - b.WithKind("ClusterCIDR") + b.WithKind("ServiceCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from -// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a -// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. -// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "") +func ExtractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") } -// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "status") +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") } -func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { - b := &ClusterCIDRApplyConfiguration{} - err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) +func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ServiceCIDR"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(clusterCIDR.Name) + b.WithName(serviceCIDR.Name) - b.WithKind("ClusterCIDR") + b.WithKind("ServiceCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } @@ -83,7 +84,7 @@ func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManage // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApply // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCID // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApply // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +118,7 @@ func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterC // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +127,7 @@ func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDR // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +136,7 @@ func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApp // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +145,7 @@ func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *Clust // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +154,7 @@ func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDR // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +163,7 @@ func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +172,7 @@ func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +182,7 @@ func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +197,7 @@ func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *C // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +211,7 @@ func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +225,7 @@ func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +233,7 @@ func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *Cluste return b } -func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +242,15 @@ func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go new file mode 100644 index 000000000..302d69194 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go new file mode 100644 index 000000000..5afc549a6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724cf..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index 06803b439..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go deleted file mode 100644 index 7c7968813..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go deleted file mode 100644 index af46f7658..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index bf951cf56..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index fcfcfbe6b..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index a6d6ee58e..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index de7ede618..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index 9e4a9bb2c..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go similarity index 59% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index 46cfc4de1..9d4c47625 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - policyv1beta1 "k8s.io/api/policy/v1beta1" + v1alpha1 "k8s.io/api/storage/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,64 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type VolumeAttributesClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(volumeAttributesClass.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +118,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +127,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +136,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +145,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +154,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +163,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +172,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +182,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +197,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +211,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +225,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,16 +233,30 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } } -// WithSpec sets the Spec field in the declarative configuration to the given value +// WithDriverName sets the DriverName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { - b.Spec = value +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } return b } diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 102ce49bf..df0e0f997 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -19,6 +19,7 @@ package discovery import ( "context" "encoding/json" + goerrors "errors" "fmt" "mime" "net/http" @@ -419,6 +420,12 @@ func (e *ErrGroupDiscoveryFailed) Error() string { return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } +// Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. +func (e *ErrGroupDiscoveryFailed) Is(target error) bool { + _, ok := target.(*ErrGroupDiscoveryFailed) + return ok +} + // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { @@ -426,6 +433,16 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } +// GroupDiscoveryFailedErrorGroups returns true if the error is an ErrGroupDiscoveryFailed error, +// along with the map of group versions that failed discovery. +func GroupDiscoveryFailedErrorGroups(err error) (map[schema.GroupVersion]error, bool) { + var groupDiscoveryError *ErrGroupDiscoveryFailed + if err != nil && goerrors.As(err, &groupDiscoveryError) { + return groupDiscoveryError.Groups, true + } + return nil, false +} + func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var sgs *metav1.APIGroupList var resources []*metav1.APIResourceList @@ -637,16 +654,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() if err != nil { - if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { - // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() - if err != nil { - return nil, err - } - } else { - return nil, err - } + return nil, err } document := &openapi_v2.Document{} err = proto.Unmarshal(data, document) diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 7dd0ae635..9fc86441a 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -60,6 +60,7 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -98,6 +99,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -202,6 +211,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index 1d3ca09ef..48dd9a8a1 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -19,7 +19,7 @@ limitations under the License. package flowcontrol import ( - v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" + v1 "k8s.io/client-go/informers/flowcontrol/v1" v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" v1beta3 "k8s.io/client-go/informers/flowcontrol/v1beta3" @@ -28,8 +28,8 @@ import ( // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface // V1beta2 provides access to shared informers for resources in V1beta2. @@ -49,9 +49,9 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go similarity index 78% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go index 9a4a90448..30c41b189 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" time "time" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + v1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.FlowSchemaLister + Lister() v1.FlowSchemaLister } type flowSchemaInformer struct { @@ -57,20 +57,20 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().List(context.TODO(), options) + return client.FlowcontrolV1().FlowSchemas().List(context.TODO(), options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().Watch(context.TODO(), options) + return client.FlowcontrolV1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1alpha1.FlowSchema{}, + &flowcontrolv1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1alpha1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&flowcontrolv1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1alpha1.FlowSchemaLister { - return v1alpha1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() v1.FlowSchemaLister { + return v1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go similarity index 99% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go index 7097c0058..3de934900 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go similarity index 76% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go index b81f5c9c3..7092c2572 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" time "time" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" + v1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PriorityLevelConfigurationLister + Lister() v1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -57,20 +57,20 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(context.TODO(), options) + return client.FlowcontrolV1().PriorityLevelConfigurations().List(context.TODO(), options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(context.TODO(), options) + return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1alpha1.PriorityLevelConfiguration{}, + &flowcontrolv1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1alpha1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&flowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1alpha1.PriorityLevelConfigurationLister { - return v1alpha1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() v1.PriorityLevelConfigurationLister { + return v1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 5495239b2..680768815 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -45,7 +45,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -259,11 +259,11 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil - // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 - case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil - case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1 + case flowcontrolv1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().FlowSchemas().Informer()}, nil + case flowcontrolv1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().PriorityLevelConfigurations().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 case flowcontrolv1beta1.SchemeGroupVersion.WithResource("flowschemas"): @@ -296,10 +296,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil // Group=networking.k8s.io, Version=v1alpha1 - case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil + case networkingv1alpha1.SchemeGroupVersion.WithResource("servicecidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): @@ -326,8 +326,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=policy, Version=v1beta1 case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil - case policyv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodSecurityPolicies().Informer()}, nil // Group=rbac.authorization.k8s.io, Version=v1 case rbacv1.SchemeGroupVersion.WithResource("clusterroles"): @@ -398,6 +396,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().CSIStorageCapacities().Informer()}, nil case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil + case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttributesClasses().Informer()}, nil // Group=storage.k8s.io, Version=v1beta1 case storagev1beta1.SchemeGroupVersion.WithResource("csidrivers"): diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go index 07e7d208c..ae9883b55 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -24,10 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ClusterCIDRs returns a ClusterCIDRInformer. - ClusterCIDRs() ClusterCIDRInformer // IPAddresses returns a IPAddressInformer. IPAddresses() IPAddressInformer + // ServiceCIDRs returns a ServiceCIDRInformer. + ServiceCIDRs() ServiceCIDRInformer } type version struct { @@ -41,12 +41,12 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ClusterCIDRs returns a ClusterCIDRInformer. -func (v *version) ClusterCIDRs() ClusterCIDRInformer { - return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // IPAddresses returns a IPAddressInformer. func (v *version) IPAddresses() IPAddressInformer { return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// ServiceCIDRs returns a ServiceCIDRInformer. +func (v *version) ServiceCIDRs() ServiceCIDRInformer { + return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go similarity index 69% rename from vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go rename to vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go index cefd0f8a1..57e602143 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go @@ -32,58 +32,58 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// ClusterCIDRInformer provides access to a shared informer and lister for -// ClusterCIDRs. -type ClusterCIDRInformer interface { +// ServiceCIDRInformer provides access to a shared informer and lister for +// ServiceCIDRs. +type ServiceCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterCIDRLister + Lister() v1alpha1.ServiceCIDRLister } -type clusterCIDRInformer struct { +type serviceCIDRInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) + return client.NetworkingV1alpha1().ServiceCIDRs().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) + return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.ClusterCIDR{}, + &networkingv1alpha1.ServiceCIDR{}, resyncPeriod, indexers, ) } -func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer) } -func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { - return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) +func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister { + return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go index a6c1825d2..055c8adc5 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -26,8 +26,6 @@ import ( type Interface interface { // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. PodDisruptionBudgets() PodDisruptionBudgetInformer - // PodSecurityPolicies returns a PodSecurityPolicyInformer. - PodSecurityPolicies() PodSecurityPolicyInformer } type version struct { @@ -45,8 +43,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } - -// PodSecurityPolicies returns a PodSecurityPolicyInformer. -func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { - return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index b87d23434..000000000 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - time "time" - - policyv1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/policy/v1beta1" - cache "k8s.io/client-go/tools/cache" -) - -// PodSecurityPolicyInformer provides access to a shared informer and lister for -// PodSecurityPolicies. -type PodSecurityPolicyInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.PodSecurityPolicyLister -} - -type podSecurityPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.PolicyV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) - }, - }, - &policyv1beta1.PodSecurityPolicy{}, - resyncPeriod, - indexers, - ) -} - -func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policyv1beta1.PodSecurityPolicy{}, f.defaultInformer) -} - -func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { - return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go index 033d3b10a..bda3b1add 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -28,6 +28,8 @@ type Interface interface { CSIStorageCapacities() CSIStorageCapacityInformer // VolumeAttachments returns a VolumeAttachmentInformer. VolumeAttachments() VolumeAttachmentInformer + // VolumeAttributesClasses returns a VolumeAttributesClassInformer. + VolumeAttributesClasses() VolumeAttributesClassInformer } type version struct { @@ -50,3 +52,8 @@ func (v *version) CSIStorageCapacities() CSIStorageCapacityInformer { func (v *version) VolumeAttachments() VolumeAttachmentInformer { return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttributesClasses returns a VolumeAttributesClassInformer. +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer { + return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 000000000..5e62e2f42 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassInformer provides access to a shared informer and lister for +// VolumeAttributesClasses. +type VolumeAttributesClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeAttributesClassLister +} + +type volumeAttributesClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttributesClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttributesClasses().Watch(context.TODO(), options) + }, + }, + &storagev1alpha1.VolumeAttributesClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1alpha1.VolumeAttributesClass{}, f.defaultInformer) +} + +func (f *volumeAttributesClassInformer) Lister() v1alpha1.VolumeAttributesClassLister { + return v1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 6345f2fb6..a0095d086 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -52,7 +52,7 @@ import ( eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" @@ -109,7 +109,7 @@ type Interface interface { EventsV1() eventsv1.EventsV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface @@ -165,7 +165,7 @@ type Clientset struct { eventsV1 *eventsv1.EventsV1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1 *flowcontrolv1.FlowcontrolV1Client flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client flowcontrolV1beta3 *flowcontrolv1beta3.FlowcontrolV1beta3Client @@ -334,9 +334,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } -// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client -func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { - return c.flowcontrolV1alpha1 +// FlowcontrolV1 retrieves the FlowcontrolV1Client +func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { + return c.flowcontrolV1 } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client @@ -604,7 +604,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.flowcontrolV1, err = flowcontrolv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -742,7 +742,7 @@ func New(c rest.Interface) *Clientset { cs.eventsV1 = eventsv1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1 = flowcontrolv1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.flowcontrolV1beta3 = flowcontrolv1beta3.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 25de81caa..fc529873f 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -82,8 +82,8 @@ import ( fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" - flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" - fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" + fakeflowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" @@ -324,9 +324,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } -// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client -func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { - return &fakeflowcontrolv1alpha1.FakeFlowcontrolV1alpha1{Fake: &c.Fake} +// FlowcontrolV1 retrieves the FlowcontrolV1Client +func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { + return &fakeflowcontrolv1.FakeFlowcontrolV1{Fake: &c.Fake} } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 009184185..6b80d6833 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -48,7 +48,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -110,7 +110,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1alpha1.AddToScheme, + flowcontrolv1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, flowcontrolv1beta3.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 64d3ce2a7..f44055fbf 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -48,7 +48,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -110,7 +110,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1alpha1.AddToScheme, + flowcontrolv1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, flowcontrolv1beta3.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go index df51baa4d..3af5d054f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go similarity index 72% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go index 72d5eff0c..d15f4b242 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go @@ -19,26 +19,26 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + v1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeFlowcontrolV1alpha1 struct { +type FakeFlowcontrolV1 struct { *testing.Fake } -func (c *FakeFlowcontrolV1alpha1) FlowSchemas() v1alpha1.FlowSchemaInterface { +func (c *FakeFlowcontrolV1) FlowSchemas() v1.FlowSchemaInterface { return &FakeFlowSchemas{c} } -func (c *FakeFlowcontrolV1alpha1) PriorityLevelConfigurations() v1alpha1.PriorityLevelConfigurationInterface { +func (c *FakeFlowcontrolV1) PriorityLevelConfigurations() v1.PriorityLevelConfigurationInterface { return &FakePriorityLevelConfigurations{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeFlowcontrolV1alpha1) RESTClient() rest.Interface { +func (c *FakeFlowcontrolV1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go similarity index 67% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go index f36763889..922a60d89 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go @@ -23,38 +23,38 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" testing "k8s.io/client-go/testing" ) // FakeFlowSchemas implements FlowSchemaInterface type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1alpha1 + Fake *FakeFlowcontrolV1 } -var flowschemasResource = v1alpha1.SchemeGroupVersion.WithResource("flowschemas") +var flowschemasResource = v1.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = v1alpha1.SchemeGroupVersion.WithKind("FlowSchema") +var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1alpha1.FlowSchemaList{}) + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{}) if obj == nil { return nil, err } @@ -63,8 +63,8 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &v1alpha1.FlowSchemaList{ListMeta: obj.(*v1alpha1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1alpha1.FlowSchemaList).Items { + list := &v1.FlowSchemaList{ListMeta: obj.(*v1.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1.FlowSchemaList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -73,69 +73,69 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1.FlowSchema{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.FlowSchemaList{}) + _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) return err } // Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -148,16 +148,16 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1al return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -170,9 +170,9 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go index 6512c36f6..27d958674 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go @@ -23,38 +23,38 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" testing "k8s.io/client-go/testing" ) // FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1alpha1 + Fake *FakeFlowcontrolV1 } -var prioritylevelconfigurationsResource = v1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") +var prioritylevelconfigurationsResource = v1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") +var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1alpha1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{}) if obj == nil { return nil, err } @@ -63,8 +63,8 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List if label == nil { label = labels.Everything() } - list := &v1alpha1.PriorityLevelConfigurationList{ListMeta: obj.(*v1alpha1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1alpha1.PriorityLevelConfigurationList).Items { + list := &v1.PriorityLevelConfigurationList{ListMeta: obj.(*v1.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1.PriorityLevelConfigurationList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -73,69 +73,69 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1.PriorityLevelConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.PriorityLevelConfigurationList{}) + _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) return err } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -148,16 +148,16 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -170,9 +170,9 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go index c6f2d9405..3d7d93ef1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go @@ -16,39 +16,39 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "net/http" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type FlowcontrolV1alpha1Interface interface { +type FlowcontrolV1Interface interface { RESTClient() rest.Interface FlowSchemasGetter PriorityLevelConfigurationsGetter } -// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. -type FlowcontrolV1alpha1Client struct { +// FlowcontrolV1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1Client struct { restClient rest.Interface } -func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { +func (c *FlowcontrolV1Client) FlowSchemas() FlowSchemaInterface { return newFlowSchemas(c) } -func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { +func (c *FlowcontrolV1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { return newPriorityLevelConfigurations(c) } -// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +// NewForConfig creates a new FlowcontrolV1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -60,9 +60,9 @@ func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new FlowcontrolV1Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -71,12 +71,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1 if err != nil { return nil, err } - return &FlowcontrolV1alpha1Client{client}, nil + return &FlowcontrolV1Client{client}, nil } -// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and +// NewForConfigOrDie creates a new FlowcontrolV1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -84,13 +84,13 @@ func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { return client } -// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *FlowcontrolV1alpha1Client { - return &FlowcontrolV1alpha1Client{c} +// New creates a new FlowcontrolV1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1Client { + return &FlowcontrolV1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -104,7 +104,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { +func (c *FlowcontrolV1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index 95baf8251..bd36c5e6a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) + Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) FlowSchemaExpansion } @@ -61,15 +61,15 @@ type flowSchemas struct { } // newFlowSchemas returns a FlowSchemas -func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { +func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ client: c.RESTClient(), } } // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Get(). Resource("flowschemas"). Name(name). @@ -80,12 +80,12 @@ func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOption } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.FlowSchemaList{} + result = &v1.FlowSchemaList{} err = c.client.Get(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1 } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Post(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchem } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -136,8 +136,8 @@ func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchem // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -150,7 +150,7 @@ func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.Flo } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Resource("flowschemas"). Name(name). @@ -160,7 +160,7 @@ func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOption } // Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Patch(pt). Resource("flowschemas"). Name(name). @@ -189,7 +189,7 @@ func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType } // Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1 if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1alpha1.FlowSchema{} + result = &v1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). @@ -215,7 +215,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1 // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1 return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1alpha1.FlowSchema{} + result = &v1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go index 065b5e6b4..990677388 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 type FlowSchemaExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 327b727c1..797fe9403 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) + Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } @@ -61,15 +61,15 @@ type priorityLevelConfigurations struct { } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations -func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { +func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ client: c.RESTClient(), } } // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Get(). Resource("prioritylevelconfigurations"). Name(name). @@ -80,12 +80,12 @@ func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, opti } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.PriorityLevelConfigurationList{} + result = &v1.PriorityLevelConfigurationList{} err = c.client.Get(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOpti } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOpt } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Post(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelC } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -136,8 +136,8 @@ func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelC // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -150,7 +150,7 @@ func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priority } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Resource("prioritylevelconfigurations"). Name(name). @@ -160,7 +160,7 @@ func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, o } // DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(pt). Resource("prioritylevelconfigurations"). Name(name). @@ -189,7 +189,7 @@ func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1alpha1.PriorityLevelConfiguration{} + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). @@ -215,7 +215,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityL return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1alpha1.PriorityLevelConfiguration{} + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go deleted file mode 100644 index 9df76351d..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. -// A group's client should implement this interface. -type ClusterCIDRsGetter interface { - ClusterCIDRs() ClusterCIDRInterface -} - -// ClusterCIDRInterface has methods to work with ClusterCIDR resources. -type ClusterCIDRInterface interface { - Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) - Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) - Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) - ClusterCIDRExpansion -} - -// clusterCIDRs implements ClusterCIDRInterface -type clusterCIDRs struct { - client rest.Interface -} - -// newClusterCIDRs returns a ClusterCIDRs -func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { - return &clusterCIDRs{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Get(). - Resource("clustercidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterCIDRList{} - err = c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Post(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Put(). - Resource("clustercidrs"). - Name(clusterCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustercidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustercidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(pt). - Resource("clustercidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustercidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go deleted file mode 100644 index 592e9fc63..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterCIDRs implements ClusterCIDRInterface -type FakeClusterCIDRs struct { - Fake *FakeNetworkingV1alpha1 -} - -var clustercidrsResource = v1alpha1.SchemeGroupVersion.WithResource("clustercidrs") - -var clustercidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR") - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts)) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go index 2d063836b..80ad184bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -28,14 +28,14 @@ type FakeNetworkingV1alpha1 struct { *testing.Fake } -func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface { - return &FakeClusterCIDRs{c} -} - func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { return &FakeIPAddresses{c} } +func (c *FakeNetworkingV1alpha1) ServiceCIDRs() v1alpha1.ServiceCIDRInterface { + return &FakeServiceCIDRs{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go new file mode 100644 index 000000000..653ef631a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCIDRs implements ServiceCIDRInterface +type FakeServiceCIDRs struct { + Fake *FakeNetworkingV1alpha1 +} + +var servicecidrsResource = v1alpha1.SchemeGroupVersion.WithResource("servicecidrs") + +var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ServiceCIDRList{ListMeta: obj.(*v1alpha1.ServiceCIDRList).ListMeta} + for _, item := range obj.(*v1alpha1.ServiceCIDRList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts)) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1alpha1.ServiceCIDR{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index 9c2979d6c..df12a463d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -18,6 +18,6 @@ limitations under the License. package v1alpha1 -type ClusterCIDRExpansion interface{} - type IPAddressExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index 884c846f5..c730e6246 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -28,8 +28,8 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface - ClusterCIDRsGetter IPAddressesGetter + ServiceCIDRsGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,14 +37,14 @@ type NetworkingV1alpha1Client struct { restClient rest.Interface } -func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { - return newClusterCIDRs(c) -} - func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { return newIPAddresses(c) } +func (c *NetworkingV1alpha1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go new file mode 100644 index 000000000..100f290a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + client rest.Interface +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { + return &serviceCIDRs{ + client: c.RESTClient(), + } +} + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Get(). + Resource("servicecidrs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ServiceCIDRList{} + err = c.client.Get(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Post(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Put(). + Resource("servicecidrs"). + Name(serviceCIDR.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Put(). + Resource("servicecidrs"). + Name(serviceCIDR.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("servicecidrs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("servicecidrs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(pt). + Resource("servicecidrs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("servicecidrs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("servicecidrs"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go deleted file mode 100644 index ade1aab7f..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePodSecurityPolicies implements PodSecurityPolicyInterface -type FakePodSecurityPolicies struct { - Fake *FakePolicyV1beta1 -} - -var podsecuritypoliciesResource = v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies") - -var podsecuritypoliciesKind = v1beta1.SchemeGroupVersion.WithKind("PodSecurityPolicy") - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *FakePodSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(podsecuritypoliciesResource, name, opts), &v1beta1.PodSecurityPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *FakePodSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go index 9c780bf1f..90670b113 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -36,10 +36,6 @@ func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDi return &FakePodDisruptionBudgets{c, namespace} } -func (c *FakePolicyV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { - return &FakePodSecurityPolicies{c} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakePolicyV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 078c16d5c..6fce70c4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -19,5 +19,3 @@ limitations under the License. package v1beta1 type PodDisruptionBudgetExpansion interface{} - -type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 944b61de4..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 5b65c9c0a..fdb509321 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -30,7 +30,6 @@ type PolicyV1beta1Interface interface { RESTClient() rest.Interface EvictionsGetter PodDisruptionBudgetsGetter - PodSecurityPoliciesGetter } // PolicyV1beta1Client is used to interact with features provided by the policy group. @@ -46,10 +45,6 @@ func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisrupti return newPodDisruptionBudgets(c, namespace) } -func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - // NewForConfig creates a new PolicyV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go index c26190aa0..0e078f348 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -36,6 +36,10 @@ func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInter return &FakeVolumeAttachments{c} } +func (c *FakeStorageV1alpha1) VolumeAttributesClasses() v1alpha1.VolumeAttributesClassInterface { + return &FakeVolumeAttributesClasses{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go new file mode 100644 index 000000000..d25263df4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go @@ -0,0 +1,145 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type FakeVolumeAttributesClasses struct { + Fake *FakeStorageV1alpha1 +} + +var volumeattributesclassesResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses") + +var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass") + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeAttributesClassList{ListMeta: obj.(*v1alpha1.VolumeAttributesClassList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeAttributesClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts)) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1alpha1.VolumeAttributesClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go index 0f51c85f9..436e910f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -21,3 +21,5 @@ package v1alpha1 type CSIStorageCapacityExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c9bf11d76..63e3fc243 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -30,6 +30,7 @@ type StorageV1alpha1Interface interface { RESTClient() rest.Interface CSIStorageCapacitiesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. @@ -45,6 +46,10 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1alpha1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 000000000..6633a4dc1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + client rest.Interface +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Get(). + Resource("volumeattributesclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeAttributesClassList{} + err = c.client.Get(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Post(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeAttributesClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Put(). + Resource("volumeattributesclasses"). + Name(volumeAttributesClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeAttributesClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattributesclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattributesclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Patch(pt). + Resource("volumeattributesclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Patch(types.ApplyPatchType). + Resource("volumeattributesclasses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go similarity index 98% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go index 3e7405168..70b5eb5b1 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1 // FlowSchemaListerExpansion allows custom methods to be added to // FlowSchemaLister. diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go similarity index 79% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go index c8a595cd2..43ccd4e5f 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,10 +30,10 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) + List(selector labels.Selector) (ret []*v1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.FlowSchema, error) + Get(name string) (*v1.FlowSchema, error) FlowSchemaListerExpansion } @@ -48,21 +48,21 @@ func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { } // List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) { +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1.FlowSchema, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.FlowSchema)) + ret = append(ret, m.(*v1.FlowSchema)) }) return ret, err } // Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1alpha1.FlowSchema, error) { +func (s *flowSchemaLister) Get(name string) (*v1.FlowSchema, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("flowschema"), name) + return nil, errors.NewNotFound(v1.Resource("flowschema"), name) } - return obj.(*v1alpha1.FlowSchema), nil + return obj.(*v1.FlowSchema), nil } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go similarity index 79% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go index daa4ff31d..61189b9cf 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,10 +30,10 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) + Get(name string) (*v1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } @@ -48,21 +48,21 @@ func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelCon } // List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) { +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PriorityLevelConfiguration)) + ret = append(ret, m.(*v1.PriorityLevelConfiguration)) }) return ret, err } // Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) { +func (s *priorityLevelConfigurationLister) Get(name string) (*v1.PriorityLevelConfiguration, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("prioritylevelconfiguration"), name) + return nil, errors.NewNotFound(v1.Resource("prioritylevelconfiguration"), name) } - return obj.(*v1alpha1.PriorityLevelConfiguration), nil + return obj.(*v1.PriorityLevelConfiguration), nil } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go index d57b71b00..fc7316521 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -18,10 +18,10 @@ limitations under the License. package v1alpha1 -// ClusterCIDRListerExpansion allows custom methods to be added to -// ClusterCIDRLister. -type ClusterCIDRListerExpansion interface{} - // IPAddressListerExpansion allows custom methods to be added to // IPAddressLister. type IPAddressListerExpansion interface{} + +// ServiceCIDRListerExpansion allows custom methods to be added to +// ServiceCIDRLister. +type ServiceCIDRListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go similarity index 54% rename from vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go rename to vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go index dca9d7bf0..8bc2b10e6 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go @@ -25,44 +25,44 @@ import ( "k8s.io/client-go/tools/cache" ) -// ClusterCIDRLister helps list ClusterCIDRs. +// ServiceCIDRLister helps list ServiceCIDRs. // All objects returned here must be treated as read-only. -type ClusterCIDRLister interface { - // List lists all ClusterCIDRs in the indexer. +type ServiceCIDRLister interface { + // List lists all ServiceCIDRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) - // Get retrieves the ClusterCIDR from the index for a given name. + List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) + // Get retrieves the ServiceCIDR from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterCIDR, error) - ClusterCIDRListerExpansion + Get(name string) (*v1alpha1.ServiceCIDR, error) + ServiceCIDRListerExpansion } -// clusterCIDRLister implements the ClusterCIDRLister interface. -type clusterCIDRLister struct { +// serviceCIDRLister implements the ServiceCIDRLister interface. +type serviceCIDRLister struct { indexer cache.Indexer } -// NewClusterCIDRLister returns a new ClusterCIDRLister. -func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { - return &clusterCIDRLister{indexer: indexer} +// NewServiceCIDRLister returns a new ServiceCIDRLister. +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { + return &serviceCIDRLister{indexer: indexer} } -// List lists all ClusterCIDRs in the indexer. -func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { +// List lists all ServiceCIDRs in the indexer. +func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterCIDR)) + ret = append(ret, m.(*v1alpha1.ServiceCIDR)) }) return ret, err } -// Get retrieves the ClusterCIDR from the index for a given name. -func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { +// Get retrieves the ServiceCIDR from the index for a given name. +func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) + return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name) } - return obj.(*v1alpha1.ClusterCIDR), nil + return obj.(*v1alpha1.ServiceCIDR), nil } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go index 9a005f20b..eba5e2f72 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go @@ -25,7 +25,3 @@ type EvictionListerExpansion interface{} // EvictionNamespaceListerExpansion allows custom methods to be added to // EvictionNamespaceLister. type EvictionNamespaceListerExpansion interface{} - -// PodSecurityPolicyListerExpansion allows custom methods to be added to -// PodSecurityPolicyLister. -type PodSecurityPolicyListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 7e73161b2..000000000 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodSecurityPolicyLister helps list PodSecurityPolicies. -// All objects returned here must be treated as read-only. -type PodSecurityPolicyLister interface { - // List lists all PodSecurityPolicies in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) - // Get retrieves the PodSecurityPolicy from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PodSecurityPolicy, error) - PodSecurityPolicyListerExpansion -} - -// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. -type podSecurityPolicyLister struct { - indexer cache.Indexer -} - -// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. -func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { - return &podSecurityPolicyLister{indexer: indexer} -} - -// List lists all PodSecurityPolicies in the indexer. -func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) - }) - return ret, err -} - -// Get retrieves the PodSecurityPolicy from the index for a given name. -func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) - } - return obj.(*v1beta1.PodSecurityPolicy), nil -} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go index edefe6d05..327fb6e31 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -29,3 +29,7 @@ type CSIStorageCapacityNamespaceListerExpansion interface{} // VolumeAttachmentListerExpansion allows custom methods to be added to // VolumeAttachmentLister. type VolumeAttachmentListerExpansion interface{} + +// VolumeAttributesClassListerExpansion allows custom methods to be added to +// VolumeAttributesClassLister. +type VolumeAttributesClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 000000000..f30b4a89b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassLister helps list VolumeAttributesClasses. +// All objects returned here must be treated as read-only. +type VolumeAttributesClassLister interface { + // List lists all VolumeAttributesClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) + // Get retrieves the VolumeAttributesClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.VolumeAttributesClass, error) + VolumeAttributesClassListerExpansion +} + +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface. +type volumeAttributesClassLister struct { + indexer cache.Indexer +} + +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { + return &volumeAttributesClassLister{indexer: indexer} +} + +// List lists all VolumeAttributesClasses in the indexer. +func (s *volumeAttributesClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeAttributesClass)) + }) + return ret, err +} + +// Get retrieves the VolumeAttributesClass from the index for a given name. +func (s *volumeAttributesClassLister) Get(name string) (*v1alpha1.VolumeAttributesClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumeattributesclass"), name) + } + return obj.(*v1alpha1.VolumeAttributesClass), nil +} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index 7ab3cd46f..ca517a01d 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -17,6 +17,7 @@ limitations under the License. package restmapper import ( + "fmt" "strings" "k8s.io/klog/v2" @@ -32,13 +33,15 @@ type shortcutExpander struct { RESTMapper meta.RESTMapper discoveryClient discovery.DiscoveryInterface + + warningHandler func(string) } var _ meta.ResettableRESTMapper = shortcutExpander{} // NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { - return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, warningHandler func(string)) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client, warningHandler: warningHandler} } // KindFor fulfills meta.RESTMapper @@ -145,16 +148,37 @@ func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionRes } } + found := false + var rsc schema.GroupVersionResource + warnedAmbiguousShortcut := make(map[schema.GroupResource]bool) for _, item := range shortcutResources { if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { continue } if resource.Resource == item.ShortForm.Resource { - resource.Resource = item.LongForm.Resource - resource.Group = item.LongForm.Group - return resource + if found { + if item.LongForm.Group == rsc.Group && item.LongForm.Resource == rsc.Resource { + // It is common and acceptable that group/resource has multiple + // versions registered in cluster. This does not introduce ambiguity + // in terms of shortname usage. + continue + } + if !warnedAmbiguousShortcut[item.LongForm] { + if e.warningHandler != nil { + e.warningHandler(fmt.Sprintf("short name %q could also match lower priority resource %s", resource.Resource, item.LongForm.String())) + } + warnedAmbiguousShortcut[item.LongForm] = true + } + continue + } + rsc.Resource = item.LongForm.Resource + rsc.Group = item.LongForm.Group + found = true } } + if found { + return rsc + } // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling if len(resource.Group) == 0 { diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 45eaff528..c1ea13de5 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -334,12 +334,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return nil } if err != nil { - if !apierrors.IsInvalid(err) { - return err - } - klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) fallbackToList = true - // Ensure that we won't accidentally pass some garbage down the watch. + // ensure that we won't accidentally pass some garbage down the watch. w = nil } } @@ -351,6 +348,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } } + klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) + resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) @@ -395,6 +394,11 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: + // we can only end up here when the stopCh + // was closed after a successful watchlist or list request + if w != nil { + w.Stop() + } return nil default: } @@ -670,6 +674,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // "k8s.io/initial-events-end" bookmark. initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) r.setIsLastSyncResourceVersionUnavailable(false) + + // we utilize the temporaryStore to ensure independence from the current store implementation. + // as of today, the store is implemented as a queue and will be drained by the higher-level + // component as soon as it finishes replacing the content. + checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %v", err) } @@ -762,7 +772,7 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { if exitOnInitialEventsEndBookmark != nil { *exitOnInitialEventsEndBookmark = true } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go new file mode 100644 index 000000000..aa3027d71 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -0,0 +1,119 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "os" + "sort" + "strconv" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +var dataConsistencyDetectionEnabled = false + +func init() { + dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// checkWatchListConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + if !dataConsistencyDetectionEnabled { + return + } + checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) +} + +// checkWatchListConsistency exists solely for testing purposes. +// we cannot use checkWatchListConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) + opts := metav1.ListOptions{ + ResourceVersion: lastSyncedResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + } + var list runtime.Object + err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listerWatcher.List(opts) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + + listItems := toMetaObjectSliceOrDie(rawListItems) + storeItems := toMetaObjectSliceOrDie(store.List()) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(storeItems)) + + if !cmp.Equal(listItems, storeItems) { + klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) + msg := "data inconsistency detected for the watch-list feature, panicking!" + panic(msg) + } +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index be8694ddb..b3f37431d 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -334,11 +334,9 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - klog.V(2).Infof("stop requested") return false } - klog.V(4).Infof("caches populated") return true } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 10744156b..0fc2fd0a0 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -49,12 +49,12 @@ type InClusterConfig interface { Possible() bool } -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go index e3000bf6e..e0164f301 100644 --- a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -81,27 +81,27 @@ type EventSinkImpl struct { } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't create an event with empty namespace") } - return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{}) + return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{}) } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't update an event with empty namespace") } - return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{}) + return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{}) } // Patch applies the patch and returns the patched event, and an error, if there is any. -func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't patch an event with empty namespace") } - return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) + return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) } // NewBroadcaster Creates a new event broadcaster. @@ -124,13 +124,13 @@ func (e *eventBroadcasterImpl) Shutdown() { } // refreshExistingEventSeries refresh events TTL -func (e *eventBroadcasterImpl) refreshExistingEventSeries() { +func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() for isomorphicKey, event := range e.eventCache { if event.Series != nil { - if recordedEvent, retry := recordEvent(e.sink, event); !retry { + if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry { if recordedEvent != nil { e.eventCache[isomorphicKey] = recordedEvent } @@ -142,7 +142,7 @@ func (e *eventBroadcasterImpl) refreshExistingEventSeries() { // finishSeries checks if a series has ended and either: // - write final count to the apiserver // - delete a singleton event (i.e. series field is nil) from the cache -func (e *eventBroadcasterImpl) finishSeries() { +func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() @@ -150,7 +150,7 @@ func (e *eventBroadcasterImpl) finishSeries() { eventSerie := event.Series if eventSerie != nil { if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { - if _, retry := recordEvent(e.sink, event); !retry { + if _, retry := recordEvent(ctx, e.sink, event); !retry { delete(e.eventCache, isomorphicKey) } } @@ -161,13 +161,13 @@ func (e *eventBroadcasterImpl) finishSeries() { } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder { +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger { hostname, _ := os.Hostname() reportingInstance := reportingController + "-" + hostname - return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}} + return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} } -func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) { +func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) { // Make a copy before modification, because there could be multiple listeners. eventCopy := event.DeepCopy() go func() { @@ -197,7 +197,7 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C }() if evToRecord != nil { // TODO: Add a metric counting the number of recording attempts - e.attemptRecording(evToRecord) + e.attemptRecording(ctx, evToRecord) // We don't want the new recorded Event to be reflected in the // client's cache because server-side mutations could mess with the // aggregation mechanism used by the client. @@ -205,40 +205,45 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C }() } -func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event { +func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) { tries := 0 for { - if recordedEvent, retry := recordEvent(e.sink, event); !retry { - return recordedEvent + if _, retry := recordEvent(ctx, e.sink, event); !retry { + return } tries++ if tries >= maxTriesPerEvent { - klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - return nil + klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) + return } // Randomize sleep so that various clients won't all be - // synced up if the master goes down. - time.Sleep(wait.Jitter(e.sleepDuration, 0.25)) + // synced up if the master goes down. Give up when + // the context is canceled. + select { + case <-ctx.Done(): + return + case <-time.After(wait.Jitter(e.sleepDuration, 0.25)): + } } } -func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { +func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { var newEvent *eventsv1.Event var err error isEventSeries := event.Series != nil if isEventSeries { patch, patchBytesErr := createPatchBytesForSeries(event) if patchBytesErr != nil { - klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr) + klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible") return nil, false } - newEvent, err = sink.Patch(event, patch) + newEvent, err = sink.Patch(ctx, event, patch) } // Update can fail because the event may have been removed and it no longer exists. if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" - newEvent, err = sink.Create(event) + newEvent, err = sink.Create(ctx, event) } if err == nil { return newEvent, false @@ -248,7 +253,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) return nil, false case *errors.StatusError: if errors.IsAlreadyExists(err) { @@ -260,9 +265,9 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) if isEventSeries { return nil, true } - klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) } else { - klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) } return nil, false case *errors.UnexpectedObjectError: @@ -271,7 +276,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) default: // This case includes actual http transport errors. Go ahead and retry. } - klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)") return nil, true } @@ -307,21 +312,31 @@ func getKey(event *eventsv1.Event) eventKey { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. // The return value can be ignored or used to stop recording, if desired. // TODO: this function should also return an error. +// +// Deprecated: use StartLogging instead. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() { - stopWatcher, err := e.StartEventWatcher( + logger := klog.Background().V(int(verbosity)) + stopWatcher, err := e.StartLogging(logger) + if err != nil { + logger.Error(err, "Failed to start event watcher") + return func() {} + } + return stopWatcher +} + +// StartLogging starts sending events received from this EventBroadcaster to the structured logger. +// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). +// The returned function can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) { + return e.StartEventWatcher( func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - klog.Errorf("unexpected type, expected eventsv1.Event") + logger.Error(nil, "unexpected type, expected eventsv1.Event") return } - klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) + logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) }) - if err != nil { - klog.Errorf("failed to start event watcher: '%v'", err) - return func() {} - } - return stopWatcher } // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. @@ -329,7 +344,6 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) { watcher, err := e.Watch() if err != nil { - klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) return nil, err } go func() { @@ -345,37 +359,42 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime return watcher.Stop, nil } -func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error { +func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error { eventHandler := func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - klog.Errorf("unexpected type, expected eventsv1.Event") + klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event") return } - e.recordToSink(event, clock.RealClock{}) + e.recordToSink(ctx, event, clock.RealClock{}) } stopWatcher, err := e.StartEventWatcher(eventHandler) if err != nil { return err } go func() { - <-stopCh + <-ctx.Done() stopWatcher() }() return nil } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// Deprecated: use StartRecordingToSinkWithContext instead. func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { - go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh) - go wait.Until(e.finishSeries, finishTime, stopCh) - err := e.startRecordingEvents(stopCh) + err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh)) if err != nil { - klog.Errorf("unexpected type, expected eventsv1.Event") - return + klog.Background().Error(err, "Failed to start recording to sink") } } +// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error { + go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime) + go wait.UntilWithContext(ctx, e.finishSeries, finishTime) + return e.startRecordingEvents(ctx) +} + type eventBroadcasterAdapterImpl struct { coreClient typedv1core.EventsGetter coreBroadcaster record.EventBroadcaster @@ -409,14 +428,14 @@ func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{ } } -func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder { +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger { if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) } return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) } -func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder { +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger { return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) } diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go index 17d053271..654317884 100644 --- a/vendor/k8s.io/client-go/tools/events/event_recorder.go +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -40,12 +40,33 @@ type recorderImpl struct { clock clock.Clock } +var _ EventRecorder = &recorderImpl{} + func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...) +} + +type recorderImplLogger struct { + *recorderImpl + logger klog.Logger +} + +var _ EventRecorderLogger = &recorderImplLogger{} + +func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...) +} + +func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { + return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} +} + +func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { timestamp := metav1.MicroTime{Time: time.Now()} message := fmt.Sprintf(note, args...) refRegarding, err := reference.GetReference(recorder.scheme, regarding) if err != nil { - klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) + logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message) return } @@ -53,11 +74,11 @@ func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.O if related != nil { refRelated, err = reference.GetReference(recorder.scheme, related) if err != nil { - klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + logger.V(9).Info("Could not construct reference", "object", related, "err", err) } } if !util.ValidateEventType(eventtype) { - klog.Errorf("Unsupported event type: '%v'", eventtype) + logger.Error(nil, "Unsupported event type", "eventType", eventtype) return } event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) diff --git a/vendor/k8s.io/client-go/tools/events/fake.go b/vendor/k8s.io/client-go/tools/events/fake.go index d572e0d3e..e26826d6c 100644 --- a/vendor/k8s.io/client-go/tools/events/fake.go +++ b/vendor/k8s.io/client-go/tools/events/fake.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -29,6 +30,8 @@ type FakeRecorder struct { Events chan string } +var _ EventRecorderLogger = &FakeRecorder{} + // Eventf emits an event func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { if f.Events != nil { @@ -36,6 +39,10 @@ func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, } } +func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { + return f +} + // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go index 20f8ca05d..bb6109f62 100644 --- a/vendor/k8s.io/client-go/tools/events/interfaces.go +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -17,39 +17,30 @@ limitations under the License. package events import ( + "context" + eventsv1 "k8s.io/api/events/v1" "k8s.io/apimachinery/pkg/runtime" + internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" ) -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Eventf constructs an event from the given information and puts it in the queue for sending. - // 'regarding' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers - // a creation or deletion of related object. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'action' explains what happened with regarding/what action did the ReportingController - // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) - // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). - // 'note' is intended to be human readable. - Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) -} +type EventRecorder = internalevents.EventRecorder +type EventRecorderLogger = internalevents.EventRecorderLogger // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + // Deprecated: use StartRecordingToSinkWithContext instead. StartRecordingToSink(stopCh <-chan struct{}) + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSinkWithContext(ctx context.Context) error + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger // StartEventWatcher enables you to watch for emitted events without usage // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). @@ -59,8 +50,14 @@ type EventBroadcaster interface { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured // logging function. The return value can be ignored or used to stop recording, if desired. + // Deprecated: use StartLogging instead. StartStructuredLogging(verbosity klog.Level) func() + // StartLogging starts sending events received from this EventBroadcaster to the structured logger. + // To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). + // The returned function can be ignored or used to stop recording, if desired. + StartLogging(logger klog.Logger) (func(), error) + // Shutdown shuts down the broadcaster Shutdown() } @@ -70,9 +67,9 @@ type EventBroadcaster interface { // It is assumed that EventSink will return the same sorts of errors as // client-go's REST client. type EventSink interface { - Create(event *eventsv1.Event) (*eventsv1.Event, error) - Update(event *eventsv1.Event) (*eventsv1.Event, error) - Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) + Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) } // EventBroadcasterAdapter is a auxiliary interface to simplify migration to @@ -85,10 +82,10 @@ type EventBroadcasterAdapter interface { StartRecordingToSink(stopCh <-chan struct{}) // NewRecorder creates a new Event Recorder with specified name. - NewRecorder(name string) EventRecorder + NewRecorder(name string) EventRecorderLogger // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. - DeprecatedNewLegacyRecorder(name string) record.EventRecorder + DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger // Shutdown shuts down the broadcaster. Shutdown() diff --git a/vendor/k8s.io/client-go/tools/internal/events/interfaces.go b/vendor/k8s.io/client-go/tools/internal/events/interfaces.go new file mode 100644 index 000000000..be6261b53 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/internal/events/interfaces.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package internal is needed to break an import cycle: record.EventRecorderAdapter +// needs this interface definition to implement it, but event.NewEventBroadcasterAdapter +// needs record.NewBroadcaster. Therefore this interface cannot be in event/interfaces.go. +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Eventf constructs an event from the given information and puts it in the queue for sending. + // 'regarding' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). + // 'note' is intended to be human readable. + Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) +} + +// EventRecorderLogger extends EventRecorder such that a logger can +// be set for methods in EventRecorder. Normally, those methods +// uses the global default logger to record errors and debug messages. +// If that is not desired, use WithLogger to provide a logger instance. +type EventRecorderLogger interface { + EventRecorder + + // WithLogger replaces the context used for logging. This is a cheap call + // and meant to be used for contextual logging: + // recorder := ... + // logger := klog.FromContext(ctx) + // recorder.WithLogger(logger).Eventf(...) + WithLogger(logger klog.Logger) EventRecorderLogger +} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index f176167dc..d1511696d 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -29,6 +29,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" + internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" @@ -110,6 +111,21 @@ type EventRecorder interface { AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) } +// EventRecorderLogger extends EventRecorder such that a logger can +// be set for methods in EventRecorder. Normally, those methods +// uses the global default logger to record errors and debug messages. +// If that is not desired, use WithLogger to provide a logger instance. +type EventRecorderLogger interface { + EventRecorder + + // WithLogger replaces the context used for logging. This is a cheap call + // and meant to be used for contextual logging: + // recorder := ... + // logger := klog.FromContext(ctx) + // recorder.WithLogger(logger).Eventf(...) + WithLogger(logger klog.Logger) EventRecorderLogger +} + // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartEventWatcher starts sending events received from this EventBroadcaster to the given @@ -131,7 +147,7 @@ type EventBroadcaster interface { // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger // Shutdown shuts down the broadcaster. Once the broadcaster is shut // down, it will only try to record an event in a sink once before @@ -142,12 +158,14 @@ type EventBroadcaster interface { // EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder // implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. type EventRecorderAdapter struct { - recorder EventRecorder + recorder EventRecorderLogger } +var _ internalevents.EventRecorder = &EventRecorderAdapter{} + // NewEventRecorderAdapter returns an adapter implementing the new // "k8s.io/client-go/tools/events".EventRecorder interface. -func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { +func NewEventRecorderAdapter(recorder EventRecorderLogger) *EventRecorderAdapter { return &EventRecorderAdapter{ recorder: recorder, } @@ -158,28 +176,76 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re a.recorder.Eventf(regarding, eventtype, reason, note, args...) } +func (a *EventRecorderAdapter) WithLogger(logger klog.Logger) internalevents.EventRecorderLogger { + return &EventRecorderAdapter{ + recorder: a.recorder.WithLogger(logger), + } +} + // Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) +func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster { + c := config{ + sleepDuration: defaultSleepDuration, + } + for _, opt := range opts { + opt(&c) + } + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: c.sleepDuration, + options: c.CorrelatorOptions, + } + ctx := c.Context + if ctx == nil { + ctx = context.Background() + } else { + // Calling Shutdown is not required when a context was provided: + // when the context is canceled, this goroutine will shut down + // the broadcaster. + go func() { + <-ctx.Done() + eventBroadcaster.Broadcaster.Shutdown() + }() + } + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) + return eventBroadcaster } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) + return NewBroadcaster(WithSleepDuration(sleepDuration)) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) - eventBroadcaster.options = options - return eventBroadcaster + return NewBroadcaster(WithCorrelatorOptions(options)) } -func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { - eventBroadcaster := &eventBroadcasterImpl{ - Broadcaster: broadcaster, - sleepDuration: sleepDuration, +func WithCorrelatorOptions(options CorrelatorOptions) BroadcasterOption { + return func(c *config) { + c.CorrelatorOptions = options + } +} + +// WithContext sets a context for the broadcaster. Canceling the context will +// shut down the broadcaster, Shutdown doesn't need to be called. The context +// can also be used to provide a logger. +func WithContext(ctx context.Context) BroadcasterOption { + return func(c *config) { + c.Context = ctx } - eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) - return eventBroadcaster +} + +func WithSleepDuration(sleepDuration time.Duration) BroadcasterOption { + return func(c *config) { + c.sleepDuration = sleepDuration + } +} + +type BroadcasterOption func(*config) + +type config struct { + CorrelatorOptions + context.Context + sleepDuration time.Duration } type eventBroadcasterImpl struct { @@ -220,12 +286,12 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } tries := 0 for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + if recordEvent(e.cancelationCtx, sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { break } tries++ if tries >= maxTriesPerEvent { - klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) break } @@ -237,7 +303,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } select { case <-e.cancelationCtx.Done(): - klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) + klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (broadcaster is shut down)", "event", event) return case <-time.After(delay): } @@ -248,7 +314,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates // existing event. -func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { +func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { var newEvent *v1.Event var err error if updateExistingEvent { @@ -271,13 +337,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) return true case *errors.StatusError: if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) { - klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) } else { - klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) } return true case *errors.UnexpectedObjectError: @@ -286,7 +352,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) + klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)", "event", event) return false } @@ -299,12 +365,15 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int }) } -// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// StartStructuredLogging starts sending events received from this EventBroadcaster to a structured logger. +// The logger is retrieved from a context if the broadcaster was constructed with a context, otherwise +// the global default is used. // The return value can be ignored or used to stop recording, if desired. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface { + loggerV := klog.FromContext(e.cancelationCtx).V(int(verbosity)) return e.StartEventWatcher( func(e *v1.Event) { - klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) + loggerV.Info("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) }) } @@ -313,26 +382,32 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { watcher, err := e.Watch() if err != nil { - klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) + klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)") } go func() { defer utilruntime.HandleCrash() - for watchEvent := range watcher.ResultChan() { - event, ok := watchEvent.Object.(*v1.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue + for { + select { + case <-e.cancelationCtx.Done(): + watcher.Stop() + return + case watchEvent := <-watcher.ResultChan(): + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) } - eventHandler(event) } }() return watcher } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger { + return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} } type recorderImpl struct { @@ -342,15 +417,17 @@ type recorderImpl struct { clock clock.PassiveClock } -func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) { +var _ EventRecorder = &recorderImpl{} + +func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.Object, annotations map[string]string, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + logger.Error(err, "Could not construct reference, will not report event", "object", object, "eventType", eventtype, "reason", reason, "message", message) return } if !util.ValidateEventType(eventtype) { - klog.Errorf("Unsupported event type: '%v'", eventtype) + logger.Error(nil, "Unsupported event type", "eventType", eventtype) return } @@ -367,16 +444,16 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m // outgoing events anyway). sent, err := recorder.ActionOrDrop(watch.Added, event) if err != nil { - klog.Errorf("unable to record event: %v (will not retry!)", err) + logger.Error(err, "Unable to record event (will not retry!)") return } if !sent { - klog.Errorf("unable to record event: too many queued events, dropped event %#v", event) + logger.Error(nil, "Unable to record event: too many queued events, dropped event", "event", event) } } func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, nil, eventtype, reason, message) + recorder.generateEvent(klog.Background(), object, nil, eventtype, reason, message) } func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { @@ -384,7 +461,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m } func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) + recorder.generateEvent(klog.Background(), object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) } func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { @@ -408,3 +485,26 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map Type: eventtype, } } + +type recorderImplLogger struct { + *recorderImpl + logger klog.Logger +} + +var _ EventRecorderLogger = &recorderImplLogger{} + +func (recorder recorderImplLogger) Event(object runtime.Object, eventtype, reason, message string) { + recorder.recorderImpl.generateEvent(recorder.logger, object, nil, eventtype, reason, message) +} + +func (recorder recorderImplLogger) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder recorderImplLogger) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(recorder.logger, object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { + return recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index fda4ad8ff..67eac4817 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -31,6 +32,8 @@ type FakeRecorder struct { IncludeObject bool } +var _ EventRecorderLogger = &FakeRecorder{} + func objectString(object runtime.Object, includeObject bool) string { if !includeObject { return "" @@ -68,6 +71,10 @@ func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[st f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } +func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { + return f +} + // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go new file mode 100644 index 000000000..4846cdb55 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" +) + +var _ Executor = &fallbackExecutor{} + +type fallbackExecutor struct { + primary Executor + secondary Executor + shouldFallback func(error) bool +} + +// NewFallbackExecutor creates an Executor that first attempts to use the +// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial +// websocket "StreamWithContext" call fails. +// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { +func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) { + return &fallbackExecutor{ + primary: primary, + secondary: secondary, + shouldFallback: shouldFallback, + }, nil +} + +// Stream is deprecated. Please use "StreamWithContext". +func (f *fallbackExecutor) Stream(options StreamOptions) error { + return f.StreamWithContext(context.Background(), options) +} + +// StreamWithContext initially attempts to call "StreamWithContext" using the +// primary executor, falling back to calling the secondary executor if the +// initial primary call to upgrade to a websocket connection fails. +func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + err := f.primary.StreamWithContext(ctx, options) + if f.shouldFallback(err) { + return f.secondary.StreamWithContext(ctx, options) + } + return err +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index 662a3cb4a..1ae67729b 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -18,17 +18,10 @@ package remotecommand import ( "context" - "fmt" "io" "net/http" - "net/url" - - "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport/spdy" ) // StreamOptions holds information pertaining to the current streaming session: @@ -63,120 +56,3 @@ type streamCreator interface { type streamProtocolHandler interface { stream(conn streamCreator) error } - -// streamExecutor handles transporting standard shell streams over an httpstream connection. -type streamExecutor struct { - upgrader spdy.Upgrader - transport http.RoundTripper - - method string - url *url.URL - protocols []string -} - -// NewSPDYExecutor connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams. -func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) -} - -// NewSPDYExecutorForTransports connects to the provided server using the given transport, -// upgrades the response using the given upgrader to multiplexed bidirectional streams. -func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { - return NewSPDYExecutorForProtocols( - transport, upgrader, method, url, - remotecommand.StreamProtocolV4Name, - remotecommand.StreamProtocolV3Name, - remotecommand.StreamProtocolV2Name, - remotecommand.StreamProtocolV1Name, - ) -} - -// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. -func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { - return &streamExecutor{ - upgrader: upgrader, - transport: transport, - method: method, - url: url, - protocols: protocols, - }, nil -} - -// Stream opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects. -func (e *streamExecutor) Stream(options StreamOptions) error { - return e.StreamWithContext(context.Background(), options) -} - -// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. -func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { - req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) - if err != nil { - return nil, nil, fmt.Errorf("error creating request: %v", err) - } - - conn, protocol, err := spdy.Negotiate( - e.upgrader, - &http.Client{Transport: e.transport}, - req, - e.protocols..., - ) - if err != nil { - return nil, nil, err - } - - var streamer streamProtocolHandler - - switch protocol { - case remotecommand.StreamProtocolV4Name: - streamer = newStreamProtocolV4(options) - case remotecommand.StreamProtocolV3Name: - streamer = newStreamProtocolV3(options) - case remotecommand.StreamProtocolV2Name: - streamer = newStreamProtocolV2(options) - case "": - klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) - fallthrough - case remotecommand.StreamProtocolV1Name: - streamer = newStreamProtocolV1(options) - } - - return conn, streamer, nil -} - -// StreamWithContext opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects or the context is done. -func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { - conn, streamer, err := e.newConnectionAndStream(ctx, options) - if err != nil { - return err - } - defer conn.Close() - - panicChan := make(chan any, 1) - errorChan := make(chan error, 1) - go func() { - defer func() { - if p := recover(); p != nil { - panicChan <- p - } - }() - errorChan <- streamer.stream(conn) - }() - - select { - case p := <-panicChan: - panic(p) - case err := <-errorChan: - return err - case <-ctx.Done(): - return ctx.Err() - } -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/spdy.go b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go new file mode 100644 index 000000000..c2bfcf8a6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go @@ -0,0 +1,171 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/klog/v2" +) + +// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection. +type spdyStreamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string + rejectRedirects bool // if true, receiving redirect from upstream is an error +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future +// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated) +// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect +// during the attempted upgrade in these "Stream" calls, an error is returned. +func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url) + if err != nil { + return nil, err + } + spdyExecutor := executor.(*spdyStreamExecutor) + spdyExecutor.rejectRedirects = true + return spdyExecutor, nil +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV5Name, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &spdyStreamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *spdyStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. +func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { + req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + client := http.Client{Transport: e.transport} + if e.rejectRedirects { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return fmt.Errorf("redirect not allowed") + } + } + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &client, + req, + e.protocols..., + ) + if err != nil { + return nil, nil, err + } + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return conn, streamer, nil +} + +// StreamWithContext opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects or the context is done. +func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + conn, streamer, err := e.newConnectionAndStream(ctx, options) + if err != nil { + return err + } + defer conn.Close() + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + errorChan <- streamer.stream(conn) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v5.go b/vendor/k8s.io/client-go/tools/remotecommand/v5.go new file mode 100644 index 000000000..4da7bfb13 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v5.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// streamProtocolV5 add support for V5 of the remote command subprotocol. +// For the streamProtocolHandler, this version is the same as V4. +type streamProtocolV5 struct { + *streamProtocolV4 +} + +var _ streamProtocolHandler = &streamProtocolV5{} + +func newStreamProtocolV5(options StreamOptions) streamProtocolHandler { + return &streamProtocolV5{ + streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4), + } +} + +func (p *streamProtocolV5) stream(conn streamCreator) error { + return p.streamProtocolV4.stream(conn) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go new file mode 100644 index 000000000..a60986dec --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + gwebsocket "github.com/gorilla/websocket" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/websocket" + "k8s.io/klog/v2" +) + +// writeDeadline defines the time that a write to the websocket connection +// must complete by, otherwise an i/o timeout occurs. The writeDeadline +// has nothing to do with a response from the other websocket connection +// endpoint; only that the message was successfully processed by the +// local websocket connection. The typical write deadline within the websocket +// library is one second. +const writeDeadline = 2 * time.Second + +var ( + _ Executor = &wsStreamExecutor{} + _ streamCreator = &wsStreamCreator{} + _ httpstream.Stream = &stream{} + + streamType2streamID = map[string]byte{ + v1.StreamTypeStdin: remotecommand.StreamStdIn, + v1.StreamTypeStdout: remotecommand.StreamStdOut, + v1.StreamTypeStderr: remotecommand.StreamStdErr, + v1.StreamTypeError: remotecommand.StreamErr, + v1.StreamTypeResize: remotecommand.StreamResize, + } +) + +const ( + // pingPeriod defines how often a heartbeat "ping" message is sent. + pingPeriod = 5 * time.Second + // pingReadDeadline defines the time waiting for a response heartbeat + // "pong" message before a timeout error occurs for websocket reading. + // This duration must always be greater than the "pingPeriod". By defining + // this deadline in terms of the ping period, we are essentially saying + // we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout. + pingReadDeadline = (pingPeriod * 3) + (1 * time.Second) +) + +// wsStreamExecutor handles transporting standard shell streams over an httpstream connection. +type wsStreamExecutor struct { + transport http.RoundTripper + upgrader websocket.ConnectionHolder + method string + url string + // requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io). + protocols []string + // selected protocol from the handshake process; could be empty string if handshake fails. + negotiated string + // period defines how often a "ping" heartbeat message is sent to the other endpoint. + heartbeatPeriod time.Duration + // deadline defines the amount of time before "pong" response must be received. + heartbeatDeadline time.Duration +} + +func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) { + // Only supports V5 protocol for correct version skew functionality. + // Previous api servers will proxy upgrade requests to legacy websocket + // servers on container runtimes which support V1-V4. These legacy + // websocket servers will not handle the new CLOSE signal. + return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name) +} + +// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection. +func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) { + transport, upgrader, err := websocket.RoundTripperFor(config) + if err != nil { + return nil, fmt.Errorf("error creating websocket transports: %v", err) + } + return &wsStreamExecutor{ + transport: transport, + upgrader: upgrader, + method: method, + url: url, + protocols: protocols, + heartbeatPeriod: pingPeriod, + heartbeatDeadline: pingReadDeadline, + }, nil +} + +// Deprecated: use StreamWithContext instead to avoid possible resource leaks. +// See https://github.com/kubernetes/kubernetes/pull/103177 for details. +func (e *wsStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various +// goroutines to implement the necessary streams over the connection. The "options" parameter +// defines which streams are requested. Returns an error if one occurred. This method is NOT +// safe to run concurrently with the same executor (because of the state stored in the upgrader). +func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil) + if err != nil { + return err + } + conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...) + if err != nil { + return err + } + if conn == nil { + panic(fmt.Errorf("websocket connection is nil")) + } + defer conn.Close() + e.negotiated = conn.Subprotocol() + klog.V(4).Infof("The subprotocol is %s", e.negotiated) + + var streamer streamProtocolHandler + switch e.negotiated { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + creator := newWSStreamCreator(conn) + go creator.readDemuxLoop( + e.upgrader.DataBufferSize(), + e.heartbeatPeriod, + e.heartbeatDeadline, + ) + errorChan <- streamer.stream(creator) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +type wsStreamCreator struct { + conn *gwebsocket.Conn + // Protects writing to websocket connection; reading is lock-free + connWriteLock sync.Mutex + // map of stream id to stream; multiple streams read/write the connection + streams map[byte]*stream + streamsMu sync.Mutex +} + +func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { + return &wsStreamCreator{ + conn: conn, + streams: map[byte]*stream{}, + } +} + +func (c *wsStreamCreator) getStream(id byte) *stream { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + return c.streams[id] +} + +func (c *wsStreamCreator) setStream(id byte, s *stream) { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + c.streams[id] = s +} + +// CreateStream uses id from passed headers to create a stream over "c.conn" connection. +// Returns a Stream structure or nil and an error if one occurred. +func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) { + streamType := headers.Get(v1.StreamType) + id, ok := streamType2streamID[streamType] + if !ok { + return nil, fmt.Errorf("unknown stream type: %s", streamType) + } + if s := c.getStream(id); s != nil { + return nil, fmt.Errorf("duplicate stream for type %s", streamType) + } + reader, writer := io.Pipe() + s := &stream{ + headers: headers, + readPipe: reader, + writePipe: writer, + conn: c.conn, + connWriteLock: &c.connWriteLock, + id: id, + } + c.setStream(id, s) + return s, nil +} + +// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket +// connection. This loop reads the connection, and demultiplexes the data +// into one of the individual stream pipes (by checking the stream id). This +// loop can *not* be run concurrently, because there can only be one websocket +// connection reader at a time (a read mutex would provide no benefit). +func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) { + // Initialize and start the ping/pong heartbeat. + h := newHeartbeat(c.conn, period, deadline) + // Set initial timeout for websocket connection reading. + if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil { + klog.Errorf("Websocket initial setting read deadline failed %v", err) + return + } + go h.start() + // Buffer size must correspond to the same size allocated + // for the read buffer during websocket client creation. A + // difference can cause incomplete connection reads. + readBuffer := make([]byte, bufferSize) + for { + // NextReader() only returns data messages (BinaryMessage or Text + // Message). Even though this call will never return control frames + // such as ping, pong, or close, this call is necessary for these + // message types to be processed. There can only be one reader + // at a time, so this reader loop must *not* be run concurrently; + // there is no lock for reading. Calling "NextReader()" before the + // current reader has been processed will close the current reader. + // If the heartbeat read deadline times out, this "NextReader()" will + // return an i/o error, and error handling will clean up. + messageType, r, err := c.conn.NextReader() + if err != nil { + websocketErr, ok := err.(*gwebsocket.CloseError) + if ok && websocketErr.Code == gwebsocket.CloseNormalClosure { + err = nil // readers will get io.EOF as it's a normal closure + } else { + err = fmt.Errorf("next reader: %w", err) + } + c.closeAllStreamReaders(err) + return + } + // All remote command protocols send/receive only binary data messages. + if messageType != gwebsocket.BinaryMessage { + c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType)) + return + } + // It's ok to read just a single byte because the underlying library wraps the actual + // connection with a buffered reader anyway. + _, err = io.ReadFull(r, readBuffer[:1]) + if err != nil { + c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err)) + return + } + streamID := readBuffer[0] + s := c.getStream(streamID) + if s == nil { + klog.Errorf("Unknown stream id %d, discarding message", streamID) + continue + } + for { + nr, errRead := r.Read(readBuffer) + if nr > 0 { + // Write the data to the stream's pipe. This can block. + _, errWrite := s.writePipe.Write(readBuffer[:nr]) + if errWrite != nil { + // Pipe must have been closed by the stream user. + // Nothing to do, discard the message. + break + } + } + if errRead != nil { + if errRead == io.EOF { + break + } + c.closeAllStreamReaders(fmt.Errorf("read message: %w", err)) + return + } + } + } +} + +// closeAllStreamReaders closes readers in all streams. +// This unblocks all stream.Read() calls. +func (c *wsStreamCreator) closeAllStreamReaders(err error) { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + for _, s := range c.streams { + // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. + _ = s.writePipe.CloseWithError(err) + } +} + +type stream struct { + headers http.Header + readPipe *io.PipeReader + writePipe *io.PipeWriter + // conn is used for writing directly into the connection. + // Is nil after Close() / Reset() to prevent future writes. + conn *gwebsocket.Conn + // connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only. + // The mutex is shared across all streams because the underlying connection is shared. + connWriteLock *sync.Mutex + id byte +} + +func (s *stream) Read(p []byte) (n int, err error) { + return s.readPipe.Read(p) +} + +// Write writes directly to the underlying WebSocket connection. +func (s *stream) Write(p []byte) (n int, err error) { + klog.V(4).Infof("Write() on stream %d", s.id) + defer klog.V(4).Infof("Write() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return 0, fmt.Errorf("write on closed stream %d", s.id) + } + err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + klog.V(7).Infof("Websocket setting write deadline failed %v", err) + return 0, err + } + // Message writer buffers the message data, so we don't need to do that ourselves. + // Just write id and the data as two separate writes to avoid allocating an intermediate buffer. + w, err := s.conn.NextWriter(gwebsocket.BinaryMessage) + if err != nil { + return 0, err + } + defer func() { + if w != nil { + w.Close() + } + }() + _, err = w.Write([]byte{s.id}) + if err != nil { + return 0, err + } + n, err = w.Write(p) + if err != nil { + return n, err + } + err = w.Close() + w = nil + return n, err +} + +// Close half-closes the stream, indicating this side is finished with the stream. +func (s *stream) Close() error { + klog.V(4).Infof("Close() on stream %d", s.id) + defer klog.V(4).Infof("Close() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return fmt.Errorf("Close() on already closed stream %d", s.id) + } + // Communicate the CLOSE stream signal to the other websocket endpoint. + err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id}) + s.conn = nil + return err +} + +func (s *stream) Reset() error { + klog.V(4).Infof("Reset() on stream %d", s.id) + defer klog.V(4).Infof("Reset() done on stream %d", s.id) + s.Close() + return s.writePipe.Close() +} + +func (s *stream) Headers() http.Header { + return s.headers +} + +func (s *stream) Identifier() uint32 { + return uint32(s.id) +} + +// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This +// heartbeat works by setting a read deadline on the websocket connection, then +// pushing this deadline into the future for every successful heartbeat. If the +// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call +// inside the "readDemuxLoop" will return an i/o error prompting a connection close +// and cleanup. +type heartbeat struct { + conn *gwebsocket.Conn + // period defines how often a "ping" heartbeat message is sent to the other endpoint + period time.Duration + // closing the "closer" channel will clean up the heartbeat timers + closer chan struct{} + // optional data to send with "ping" message + message []byte + // optionally received data message with "pong" message, same as sent with ping + pongMessage []byte +} + +// newHeartbeat creates heartbeat structure encapsulating fields necessary to +// run the websocket connection ping/pong mechanism and sets up handlers on +// the websocket connection. +func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat { + h := &heartbeat{ + conn: conn, + period: period, + closer: make(chan struct{}), + } + // Set up handler for receiving returned "pong" message from other endpoint + // by pushing the read deadline into the future. The "msg" received could + // be empty. + h.conn.SetPongHandler(func(msg string) error { + // Push the read deadline into the future. + klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg) + err := h.conn.SetReadDeadline(time.Now().Add(deadline)) + if err != nil { + klog.Errorf("Websocket setting read deadline failed %v", err) + return err + } + if len(msg) > 0 { + h.pongMessage = []byte(msg) + } + return nil + }) + // Set up handler to cleanup timers when this endpoint receives "Close" message. + closeHandler := h.conn.CloseHandler() + h.conn.SetCloseHandler(func(code int, text string) error { + close(h.closer) + return closeHandler(code, text) + }) + return h +} + +// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC +// this data sent with "ping" message should be returned in "pong" message. +func (h *heartbeat) setMessage(msg string) { + h.message = []byte(msg) +} + +// start the heartbeat by setting up necesssary handlers and looping by sending "ping" +// message every "period" until the "closer" channel is closed. +func (h *heartbeat) start() { + // Loop to continually send "ping" message through websocket connection every "period". + t := time.NewTicker(h.period) + defer t.Stop() + for { + select { + case <-h.closer: + klog.V(8).Infof("closed channel--returning") + return + case <-t.C: + // "WriteControl" does not need to be protected by a mutex. According to + // gorilla/websockets library docs: "The Close and WriteControl methods can + // be called concurrently with all other methods." + if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil { + klog.V(8).Infof("Websocket Ping succeeeded") + } else { + klog.Errorf("Websocket Ping failed: %v", err) + if errors.Is(err, gwebsocket.ErrCloseSent) { + // we continue because c.conn.CloseChan will manage closing the connection already + continue + } else if e, ok := err.(net.Error); ok && e.Timeout() { + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + // If Temporary function hadn't been deprecated, we would have used it. + // But most of temporary errors are timeout errors anyway. + continue + } + return + } + } + } +} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go index f50b68e5f..9fddc6c5f 100644 --- a/vendor/k8s.io/client-go/transport/spdy/spdy.go +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -43,11 +43,15 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if config.Proxy != nil { proxy = config.Proxy } - upgradeRoundTripper := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxy, - PingPeriod: time.Second * 5, + upgradeRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, + UpgradeTransport: nil, }) + if err != nil { + return nil, nil, err + } wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 78060719a..4770331a0 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -96,6 +96,32 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCA() { + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + >2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this block, + cluster certificate authority data gets loaded into TLS before the handshake process + for client to later during the handshake verify the apiserver certificate + + normal args related to this stage: + --certificate-authority='': + Path to a cert file for the certificate authority + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 3, see: a few lines below in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + rootCAs, err := rootCertPool(c.TLS.CAData) if err != nil { return nil, fmt.Errorf("unable to load root certificates: %w", err) @@ -121,6 +147,35 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCertAuth() || c.HasCertCallback() { + + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + >3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this callback function, + client certificate and pub key get loaded into TLS during the handshake process + for apiserver to later in the step 4 verify the client certificate + + normal args related to this stage: + --client-certificate='': + Path to a client certificate file for TLS + --client-key='': + Path to a client key file for TLS + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 2, see: a few lines above in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { // Note: static key/cert data always take precedence over cert // callback. diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go new file mode 100644 index 000000000..010f916bc --- /dev/null +++ b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go @@ -0,0 +1,163 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + + gwebsocket "github.com/gorilla/websocket" + + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport" +) + +var ( + _ utilnet.TLSClientConfigHolder = &RoundTripper{} + _ http.RoundTripper = &RoundTripper{} +) + +// ConnectionHolder defines functions for structure providing +// access to the websocket connection. +type ConnectionHolder interface { + DataBufferSize() int + Connection() *gwebsocket.Conn +} + +// RoundTripper knows how to establish a connection to a remote WebSocket endpoint and make it available for use. +// RoundTripper must not be reused. +type RoundTripper struct { + // TLSConfig holds the TLS configuration settings to use when connecting + // to the remote server. + TLSConfig *tls.Config + + // Proxier specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxier func(req *http.Request) (*url.URL, error) + + // Conn holds the WebSocket connection after a round trip. + Conn *gwebsocket.Conn +} + +// Connection returns the stored websocket connection. +func (rt *RoundTripper) Connection() *gwebsocket.Conn { + return rt.Conn +} + +// DataBufferSize returns the size of buffers for the +// websocket connection. +func (rt *RoundTripper) DataBufferSize() int { + return 32 * 1024 +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder. +func (rt *RoundTripper) TLSClientConfig() *tls.Config { + return rt.TLSConfig +} + +// RoundTrip connects to the remote websocket using the headers in the request and the TLS +// configuration from the config +func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response, retErr error) { + defer func() { + if request.Body != nil { + err := request.Body.Close() + if retErr == nil { + retErr = err + } + } + }() + + // set the protocol version directly on the dialer from the header + protocolVersions := request.Header[httpstream.HeaderProtocolVersion] + delete(request.Header, httpstream.HeaderProtocolVersion) + + dialer := gwebsocket.Dialer{ + Proxy: rt.Proxier, + TLSClientConfig: rt.TLSConfig, + Subprotocols: protocolVersions, + ReadBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + WriteBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + } + switch request.URL.Scheme { + case "https": + request.URL.Scheme = "wss" + case "http": + request.URL.Scheme = "ws" + default: + return nil, fmt.Errorf("unknown url scheme: %s", request.URL.Scheme) + } + wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) + if err != nil { + return nil, &httpstream.UpgradeFailureError{Cause: err} + } + + rt.Conn = wsConn + + return resp, nil +} + +// RoundTripperFor transforms the passed rest config into a wrapped roundtripper, as well +// as a pointer to the websocket RoundTripper. The websocket RoundTripper contains the +// websocket connection after RoundTrip() on the wrapper. Returns an error if there is +// a problem creating the round trippers. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, ConnectionHolder, error) { + transportCfg, err := config.TransportConfig() + if err != nil { + return nil, nil, err + } + tlsConfig, err := transport.TLSConfigFor(transportCfg) + if err != nil { + return nil, nil, err + } + proxy := config.Proxy + if proxy == nil { + proxy = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + + upgradeRoundTripper := &RoundTripper{ + TLSConfig: tlsConfig, + Proxier: proxy, + } + wrapper, err := transport.HTTPWrappersForConfig(transportCfg, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a WebSocket connection. Upon success, it returns the negotiated connection. +// The round tripper rt must use the WebSocket round tripper wsRt - see RoundTripperFor. +func Negotiate(rt http.RoundTripper, connectionInfo ConnectionHolder, req *http.Request, protocols ...string) (*gwebsocket.Conn, error) { + req.Header[httpstream.HeaderProtocolVersion] = protocols + resp, err := rt.RoundTrip(req) + if err != nil { + return nil, err + } + err = resp.Body.Close() + if err != nil { + connectionInfo.Connection().Close() + return nil, fmt.Errorf("error closing response body: %v", err) + } + return connectionInfo.Connection(), nil +} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 380c06455..a363d1afb 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -238,8 +238,12 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. func (q *Type) ShutDown() { - q.setDrain(false) - q.shutdown() + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.drain = false + q.shuttingDown = true + q.cond.Broadcast() } // ShutDownWithDrain will cause q to ignore all new items added to it. As soon @@ -252,53 +256,16 @@ func (q *Type) ShutDown() { // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. func (q *Type) ShutDownWithDrain() { - q.setDrain(true) - q.shutdown() - for q.isProcessing() && q.shouldDrain() { - q.waitForProcessing() - } -} - -// isProcessing indicates if there are still items on the work queue being -// processed. It's used to drain the work queue on an eventual shutdown. -func (q *Type) isProcessing() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return q.processing.len() != 0 -} - -// waitForProcessing waits for the worker goroutines to finish processing items -// and call Done on them. -func (q *Type) waitForProcessing() { - q.cond.L.Lock() - defer q.cond.L.Unlock() - // Ensure that we do not wait on a queue which is already empty, as that - // could result in waiting for Done to be called on items in an empty queue - // which has already been shut down, which will result in waiting - // indefinitely. - if q.processing.len() == 0 { - return - } - q.cond.Wait() -} - -func (q *Type) setDrain(shouldDrain bool) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - q.drain = shouldDrain -} - -func (q *Type) shouldDrain() bool { q.cond.L.Lock() defer q.cond.L.Unlock() - return q.drain -} -func (q *Type) shutdown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() + q.drain = true q.shuttingDown = true q.cond.Broadcast() + + for q.processing.len() != 0 && q.drain { + q.cond.Wait() + } } func (q *Type) ShuttingDown() bool { diff --git a/vendor/k8s.io/cloud-provider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go index c9a04085f..d4c11dc66 100644 --- a/vendor/k8s.io/cloud-provider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string { } // GetInstanceProviderID builds a ProviderID for a node in a cloud. +// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound) +// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) { instances, ok := cloud.Instances() if !ok { @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. if err == NotImplemented { return "", err } + if err == InstanceNotFound { + return "", err + } - return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err) } return cloud.ProviderName() + "://" + instanceID, nil } @@ -285,6 +290,7 @@ type Zones interface { } // PVLabeler is an abstract, pluggable interface for fetching labels for volumes +// DEPRECATED: PVLabeler is deprecated in favor of CSI topology feature. type PVLabeler interface { GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) } diff --git a/vendor/k8s.io/cloud-provider/options/options.go b/vendor/k8s.io/cloud-provider/options/options.go index 37aeb01e1..15be5a86c 100644 --- a/vendor/k8s.io/cloud-provider/options/options.go +++ b/vendor/k8s.io/cloud-provider/options/options.go @@ -263,7 +263,7 @@ func (o *CloudControllerManagerOptions) Validate(allControllers []string, disabl if o.WebhookServing != nil { errors = append(errors, o.WebhookServing.Validate()...) - if o.WebhookServing.BindPort == o.SecureServing.BindPort { + if o.WebhookServing.BindPort == o.SecureServing.BindPort && o.WebhookServing.BindPort != 0 { errors = append(errors, fmt.Errorf("--webhook-secure-port cannot be the same value as --secure-port")) } } diff --git a/vendor/k8s.io/cloud-provider/options/webhook.go b/vendor/k8s.io/cloud-provider/options/webhook.go index 719a70102..346e4218d 100644 --- a/vendor/k8s.io/cloud-provider/options/webhook.go +++ b/vendor/k8s.io/cloud-provider/options/webhook.go @@ -122,7 +122,7 @@ func (o *WebhookServingOptions) AddFlags(fs *pflag.FlagSet) { "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ fmt.Sprintf("clients. If set to an unspecified address (0.0.0.0 or ::), all interfaces will be used. If unset, defaults to %v.", o.BindAddress)) - fs.IntVar(&o.BindPort, "webhook-secure-port", o.BindPort, fmt.Sprintf("Secure port to serve cloud provider webhooks. If unset, defaults to %d.", o.BindPort)) + fs.IntVar(&o.BindPort, "webhook-secure-port", o.BindPort, "Secure port to serve cloud provider webhooks. If 0, don't serve webhooks at all.") fs.StringVar(&o.ServerCert.CertDirectory, "webhook-cert-dir", o.ServerCert.CertDirectory, ""+ "The directory where the TLS certs are located. "+ diff --git a/vendor/k8s.io/cloud-provider/service/helpers/helper.go b/vendor/k8s.io/cloud-provider/service/helpers/helper.go index e363c7db2..fd436c3d3 100644 --- a/vendor/k8s.io/cloud-provider/service/helpers/helper.go +++ b/vendor/k8s.io/cloud-provider/service/helpers/helper.go @@ -180,5 +180,8 @@ func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { if lhs.Hostname != rhs.Hostname { return false } + if lhs.IPMode != rhs.IPMode { + return false + } return true } diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go index 48d3093e0..27a57eb7f 100644 --- a/vendor/k8s.io/component-base/metrics/buckets.go +++ b/vendor/k8s.io/component-base/metrics/buckets.go @@ -33,6 +33,16 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { return prometheus.ExponentialBuckets(start, factor, count) } +// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is +// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. +func ExponentialBucketsRange(min, max float64, count int) []float64 { + return prometheus.ExponentialBucketsRange(min, max, count) +} + // MergeBuckets merges buckets together func MergeBuckets(buckets ...[]float64) []float64 { result := make([]float64, 1) diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go index 3b22d21ef..d68a98c44 100644 --- a/vendor/k8s.io/component-base/metrics/metric.go +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -166,7 +166,7 @@ func (r *lazyMetric) Create(version *semver.Version) bool { if deprecatedV != nil { dv = deprecatedV.String() } - registeredMetrics.WithLabelValues(string(sl), dv).Inc() + registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc() return r.IsCreated() } diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go index 7a59b7ba1..2c72cb48f 100644 --- a/vendor/k8s.io/component-base/metrics/options.go +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -31,6 +31,7 @@ type Options struct { ShowHiddenMetricsForVersion string DisabledMetrics []string AllowListMapping map[string]string + AllowListMappingManifest string } // NewOptions returns default metrics options @@ -40,6 +41,10 @@ func NewOptions() *Options { // Validate validates metrics flags options. func (o *Options) Validate() []error { + if o == nil { + return nil + } + var errs []error err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) if err != nil { @@ -77,6 +82,10 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "The map from metric-label to value allow-list of this label. The key's format is ,. "+ "The value's format is ,..."+ "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") + fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest, + "The path to the manifest file that contains the allow-list mapping. "+ + "The format of the file is the same as the flag --allow-metric-labels. "+ + "Note that the flag --allow-metric-labels will override the manifest file.") } // Apply applies parameters into global configuration of metrics. @@ -93,6 +102,8 @@ func (o *Options) Apply() { } if o.AllowListMapping != nil { SetLabelAllowListFromCLI(o.AllowListMapping) + } else if len(o.AllowListMappingManifest) > 0 { + SetLabelAllowListFromManifest(o.AllowListMappingManifest) } } @@ -118,7 +129,7 @@ func validateAllowMetricLabel(allowListMapping map[string]string) error { for k := range allowListMapping { reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) if reg.FindString(k) != k { - return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") } } return nil diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go index 49d2d40bb..30dfd2e3d 100644 --- a/vendor/k8s.io/component-base/metrics/opts.go +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -18,13 +18,18 @@ package metrics import ( "fmt" + "os" + "path/filepath" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/util/sets" promext "k8s.io/component-base/metrics/prometheusextension" + "k8s.io/klog/v2" ) var ( @@ -319,6 +324,7 @@ func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, lab if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labelValueList[index] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -329,6 +335,7 @@ func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]strin if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labels[name] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -354,3 +361,20 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { } } } + +func SetLabelAllowListFromManifest(manifest string) { + allowListLock.Lock() + defer allowListLock.Unlock() + allowListMapping := make(map[string]string) + data, err := os.ReadFile(filepath.Clean(manifest)) + if err != nil { + klog.Errorf("Failed to read allow list manifest: %v", err) + return + } + err = yaml.Unmarshal(data, &allowListMapping) + if err != nil { + klog.Errorf("Failed to parse allow list manifest: %v", err) + return + } + SetLabelAllowListFromCLI(allowListMapping) +} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 7907dfad1..3d464d12d 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -37,7 +37,7 @@ var ( Namespace: "kubernetes", Name: "healthcheck", Help: "This metric records the result of a single healthcheck.", - StabilityLevel: k8smetrics.BETA, + StabilityLevel: k8smetrics.STABLE, }, []string{"name", "type"}, ) @@ -48,7 +48,7 @@ var ( Namespace: "kubernetes", Name: "healthchecks_total", Help: "This metric records the results of all healthcheck.", - StabilityLevel: k8smetrics.BETA, + StabilityLevel: k8smetrics.STABLE, }, []string{"name", "type", "status"}, ) diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go index 1942f9958..203813e81 100644 --- a/vendor/k8s.io/component-base/metrics/registry.go +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -37,7 +37,7 @@ var ( registriesLock sync.RWMutex disabledMetrics = map[string]struct{}{} - registeredMetrics = NewCounterVec( + registeredMetricsTotal = NewCounterVec( &CounterOpts{ Name: "registered_metrics_total", Help: "The count of registered metrics broken by stability level and deprecation version.", @@ -61,6 +61,14 @@ var ( StabilityLevel: BETA, }, ) + + cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter( + &CounterOpts{ + Name: "cardinality_enforcement_unexpected_categorizations_total", + Help: "The count of unexpected categorizations during cardinality enforcement.", + StabilityLevel: ALPHA, + }, + ) ) // shouldHide be used to check if a specific metric with deprecated version should be hidden @@ -379,7 +387,8 @@ func NewKubeRegistry() KubeRegistry { } func (r *kubeRegistry) RegisterMetaMetrics() { - r.MustRegister(registeredMetrics) + r.MustRegister(registeredMetricsTotal) r.MustRegister(disabledMetricsTotal) r.MustRegister(hiddenMetricsTotal) + r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal) } diff --git a/vendor/k8s.io/component-base/tracing/tracing.go b/vendor/k8s.io/component-base/tracing/tracing.go index 50894eb3b..bdf6f377d 100644 --- a/vendor/k8s.io/component-base/tracing/tracing.go +++ b/vendor/k8s.io/component-base/tracing/tracing.go @@ -68,6 +68,12 @@ func (s *Span) End(logThreshold time.Duration) { } } +// RecordError will record err as an exception span event for this span. +// If this span is not being recorded or err is nil then this method does nothing. +func (s *Span) RecordError(err error, attributes ...attribute.KeyValue) { + s.otelSpan.RecordError(err, trace.WithAttributes(attributes...)) +} + func attributesToFields(attributes []attribute.KeyValue) []utiltrace.Field { fields := make([]utiltrace.Field, len(attributes)) for i := range attributes { diff --git a/vendor/k8s.io/component-base/tracing/utils.go b/vendor/k8s.io/component-base/tracing/utils.go index ae894a809..72c8cf23e 100644 --- a/vendor/k8s.io/component-base/tracing/utils.go +++ b/vendor/k8s.io/component-base/tracing/utils.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" oteltrace "go.opentelemetry.io/otel/trace" "k8s.io/client-go/transport" @@ -95,9 +96,17 @@ func WithTracing(handler http.Handler, tp oteltrace.TracerProvider, serviceName otelhttp.WithPropagators(Propagators()), otelhttp.WithTracerProvider(tp), } + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the http.target attribute to the otelhttp span + // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 + if r.URL != nil { + oteltrace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) + } + handler.ServeHTTP(w, r) + }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(handler, serviceName, opts...) + return otelhttp.NewHandler(wrappedHandler, serviceName, opts...) } // WrapperFor can be used to add tracing to a *rest.Config. diff --git a/vendor/k8s.io/component-helpers/node/util/sysctl/namespace.go b/vendor/k8s.io/component-helpers/node/util/sysctl/namespace.go new file mode 100644 index 000000000..7042766ad --- /dev/null +++ b/vendor/k8s.io/component-helpers/node/util/sysctl/namespace.go @@ -0,0 +1,104 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysctl + +import ( + "strings" +) + +// Namespace represents a kernel namespace name. +type Namespace string + +const ( + // refer to https://man7.org/linux/man-pages/man7/ipc_namespaces.7.html + // the Linux IPC namespace + IPCNamespace = Namespace("IPC") + + // refer to https://man7.org/linux/man-pages/man7/network_namespaces.7.html + // the network namespace + NetNamespace = Namespace("Net") + + // the zero value if no namespace is known + UnknownNamespace = Namespace("") +) + +var nameToNamespace = map[string]Namespace{ + // kernel semaphore parameters: SEMMSL, SEMMNS, SEMOPM, and SEMMNI. + "kernel.sem": IPCNamespace, + // kernel shared memory limits include shmall, shmmax, shmmni, and shm_rmid_forced. + "kernel.shmall": IPCNamespace, + "kernel.shmmax": IPCNamespace, + "kernel.shmmni": IPCNamespace, + "kernel.shm_rmid_forced": IPCNamespace, + // make backward compatibility to know the namespace of kernel.shm* + "kernel.shm": IPCNamespace, + // kernel messages include msgmni, msgmax and msgmnb. + "kernel.msgmax": IPCNamespace, + "kernel.msgmnb": IPCNamespace, + "kernel.msgmni": IPCNamespace, + // make backward compatibility to know the namespace of kernel.msg* + "kernel.msg": IPCNamespace, +} + +var prefixToNamespace = map[string]Namespace{ + "net": NetNamespace, + // mqueue filesystem provides the necessary kernel features to enable the creation + // of a user space library that implements the POSIX message queues API. + "fs.mqueue": IPCNamespace, +} + +// namespaceOf returns the namespace of the Linux kernel for a sysctl, or +// unknownNamespace if the sysctl is not known to be namespaced. +// The second return is prefixed bool. +// It returns true if the key is prefixed with a key in the prefix map +func namespaceOf(val string) Namespace { + if ns, found := nameToNamespace[val]; found { + return ns + } + for p, ns := range prefixToNamespace { + if strings.HasPrefix(val, p+".") { + return ns + } + } + return UnknownNamespace +} + +// GetNamespace extracts information from a sysctl string. It returns: +// 1. The sysctl namespace, which can be one of the following: IPC, Net, or unknown. +// 2. sysctlOrPrefix: the prefix of the sysctl parameter until the first '*'. +// If there is no '*', it will be the original string. +// 3. 'prefixed' is set to true if the sysctl parameter contains '*' or it is in the prefixToNamespace key list, in most cases, it is a suffix *. +// +// For example, if the input sysctl is 'net.ipv6.neigh.*', GetNamespace will return: +// - The Net namespace +// - The sysctlOrPrefix as 'net.ipv6.neigh' +// - 'prefixed' set to true +// +// For the input sysctl 'net.ipv6.conf.all.disable_ipv6', GetNamespace will return: +// - The Net namespace +// - The sysctlOrPrefix as 'net.ipv6.conf.all.disable_ipv6' +// - 'prefixed' set to false. +func GetNamespace(sysctl string) (ns Namespace, sysctlOrPrefix string, prefixed bool) { + sysctlOrPrefix = NormalizeName(sysctl) + firstIndex := strings.IndexAny(sysctlOrPrefix, "*") + if firstIndex != -1 { + sysctlOrPrefix = sysctlOrPrefix[:firstIndex] + prefixed = true + } + ns = namespaceOf(sysctlOrPrefix) + return +} diff --git a/vendor/k8s.io/component-helpers/node/util/sysctl/sysctl.go b/vendor/k8s.io/component-helpers/node/util/sysctl/sysctl.go new file mode 100644 index 000000000..3233c7d18 --- /dev/null +++ b/vendor/k8s.io/component-helpers/node/util/sysctl/sysctl.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysctl + +import ( + "os" + "path" + "strconv" + "strings" +) + +const ( + sysctlBase = "/proc/sys" + // VMOvercommitMemory refers to the sysctl variable responsible for defining + // the memory over-commit policy used by kernel. + VMOvercommitMemory = "vm/overcommit_memory" + // VMPanicOnOOM refers to the sysctl variable responsible for defining + // the OOM behavior used by kernel. + VMPanicOnOOM = "vm/panic_on_oom" + // KernelPanic refers to the sysctl variable responsible for defining + // the timeout after a panic for the kernel to reboot. + KernelPanic = "kernel/panic" + // KernelPanicOnOops refers to the sysctl variable responsible for defining + // the kernel behavior when an oops or BUG is encountered. + KernelPanicOnOops = "kernel/panic_on_oops" + // RootMaxKeys refers to the sysctl variable responsible for defining + // the maximum number of keys that the root user (UID 0 in the root user namespace) may own. + RootMaxKeys = "kernel/keys/root_maxkeys" + // RootMaxBytes refers to the sysctl variable responsible for defining + // the maximum number of bytes of data that the root user (UID 0 in the root user namespace) + // can hold in the payloads of the keys owned by root. + RootMaxBytes = "kernel/keys/root_maxbytes" + + // VMOvercommitMemoryAlways represents that kernel performs no memory over-commit handling. + VMOvercommitMemoryAlways = 1 + // VMPanicOnOOMInvokeOOMKiller represents that kernel calls the oom_killer function when OOM occurs. + VMPanicOnOOMInvokeOOMKiller = 0 + + // KernelPanicOnOopsAlways represents that kernel panics on kernel oops. + KernelPanicOnOopsAlways = 1 + // KernelPanicRebootTimeout is the timeout seconds after a panic for the kernel to reboot. + KernelPanicRebootTimeout = 10 + + // RootMaxKeysSetting is the maximum number of keys that the root user (UID 0 in the root user namespace) may own. + // Needed since docker creates a new key per container. + RootMaxKeysSetting = 1000000 + // RootMaxBytesSetting is the maximum number of bytes of data that the root user (UID 0 in the root user namespace) + // can hold in the payloads of the keys owned by root. + // Allocate 25 bytes per key * number of MaxKeys. + RootMaxBytesSetting = RootMaxKeysSetting * 25 +) + +// Interface is an injectable interface for running sysctl commands. +type Interface interface { + // GetSysctl returns the value for the specified sysctl setting + GetSysctl(sysctl string) (int, error) + // SetSysctl modifies the specified sysctl flag to the new value + SetSysctl(sysctl string, newVal int) error +} + +// New returns a new Interface for accessing sysctl +func New() Interface { + return &procSysctl{} +} + +// procSysctl implements Interface by reading and writing files under /proc/sys +type procSysctl struct { +} + +// GetSysctl returns the value for the specified sysctl setting +func (*procSysctl) GetSysctl(sysctl string) (int, error) { + data, err := os.ReadFile(path.Join(sysctlBase, sysctl)) + if err != nil { + return -1, err + } + val, err := strconv.Atoi(strings.Trim(string(data), " \n")) + if err != nil { + return -1, err + } + return val, nil +} + +// SetSysctl modifies the specified sysctl flag to the new value +func (*procSysctl) SetSysctl(sysctl string, newVal int) error { + return os.WriteFile(path.Join(sysctlBase, sysctl), []byte(strconv.Itoa(newVal)), 0640) +} + +// NormalizeName can return sysctl variables in dots separator format. +// The '/' separator is also accepted in place of a '.'. +// Convert the sysctl variables to dots separator format for validation. +// More info: +// +// https://man7.org/linux/man-pages/man8/sysctl.8.html +// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +func NormalizeName(val string) string { + if val == "" { + return val + } + firstSepIndex := strings.IndexAny(val, "./") + // if the first found is `.` like `net.ipv4.conf.eno2/100.rp_filter` + if firstSepIndex == -1 || val[firstSepIndex] == '.' { + return val + } + + // for `net/ipv4/conf/eno2.100/rp_filter`, swap the use of `.` and `/` + // to `net.ipv4.conf.eno2/100.rp_filter` + f := func(r rune) rune { + switch r { + case '.': + return '/' + case '/': + return '.' + } + return r + } + return strings.Map(f, val) +} diff --git a/vendor/k8s.io/controller-manager/pkg/features/kube_features.go b/vendor/k8s.io/controller-manager/pkg/features/kube_features.go index 8864be325..ab5d83885 100644 --- a/vendor/k8s.io/controller-manager/pkg/features/kube_features.go +++ b/vendor/k8s.io/controller-manager/pkg/features/kube_features.go @@ -40,6 +40,7 @@ const ( // owner: @danwinship // alpha: v1.27 + // beta: v1.29 // // Enables dual-stack values in the // `alpha.kubernetes.io/provided-node-ip` annotation @@ -62,6 +63,6 @@ func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.Mutable // To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here. var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, - CloudDualStackNodeIPs: {Default: false, PreRelease: featuregate.Alpha}, + CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.Beta}, StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.Beta}, } diff --git a/vendor/k8s.io/kms/apis/v2/api.pb.go b/vendor/k8s.io/kms/apis/v2/api.pb.go index 1b634f932..225cc3f8f 100644 --- a/vendor/k8s.io/kms/apis/v2/api.pb.go +++ b/vendor/k8s.io/kms/apis/v2/api.pb.go @@ -71,11 +71,14 @@ func (m *StatusRequest) XXX_DiscardUnknown() { var xxx_messageInfo_StatusRequest proto.InternalMessageInfo type StatusResponse struct { - // Version of the KMS plugin API. Must match the configured .resources[].providers[].kms.apiVersion + // Version of the KMS gRPC plugin API. Must equal v2 to v2beta1 (v2 is recommended, but both are equivalent). Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Any value other than "ok" is failing healthz. On failure, the associated API server healthz endpoint will contain this value as part of the error message. Healthz string `protobuf:"bytes,2,opt,name=healthz,proto3" json:"healthz,omitempty"` // the current write key, used to determine staleness of data updated via value.Transformer.TransformFromStorage. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. KeyId string `protobuf:"bytes,3,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -283,12 +286,21 @@ func (m *EncryptRequest) GetUid() string { type EncryptResponse struct { // The encrypted data. + // ciphertext must satisfy the following constraints: + // 1. The ciphertext is not empty. + // 2. The ciphertext is less than 1 kB. Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` // The KMS key ID used to encrypt the data. This must always refer to the KMS KEK and not any local KEKs that may be in use. // This can be used to inform staleness of data updated via value.Transformer.TransformFromStorage. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` // Additional metadata to be stored with the encrypted data. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. Annotations map[string][]byte `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/vendor/k8s.io/kms/apis/v2/api.proto b/vendor/k8s.io/kms/apis/v2/api.proto index 3c7d335e8..be4752f47 100644 --- a/vendor/k8s.io/kms/apis/v2/api.proto +++ b/vendor/k8s.io/kms/apis/v2/api.proto @@ -34,11 +34,14 @@ service KeyManagementService { message StatusRequest {} message StatusResponse { - // Version of the KMS plugin API. Must match the configured .resources[].providers[].kms.apiVersion + // Version of the KMS gRPC plugin API. Must equal v2 to v2beta1 (v2 is recommended, but both are equivalent). string version = 1; // Any value other than "ok" is failing healthz. On failure, the associated API server healthz endpoint will contain this value as part of the error message. string healthz = 2; // the current write key, used to determine staleness of data updated via value.Transformer.TransformFromStorage. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. string key_id = 3; } @@ -68,11 +71,20 @@ message EncryptRequest { message EncryptResponse { // The encrypted data. + // ciphertext must satisfy the following constraints: + // 1. The ciphertext is not empty. + // 2. The ciphertext is less than 1 kB. bytes ciphertext = 1; // The KMS key ID used to encrypt the data. This must always refer to the KMS KEK and not any local KEKs that may be in use. // This can be used to inform staleness of data updated via value.Transformer.TransformFromStorage. + // keyID must satisfy the following constraints: + // 1. The keyID is not empty. + // 2. The size of keyID is less than 1 kB. string key_id = 2; // Additional metadata to be stored with the encrypted data. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. map annotations = 3; } diff --git a/vendor/k8s.io/kms/pkg/service/grpc_service.go b/vendor/k8s.io/kms/pkg/service/grpc_service.go index fc463e5c4..d2d06da27 100644 --- a/vendor/k8s.io/kms/pkg/service/grpc_service.go +++ b/vendor/k8s.io/kms/pkg/service/grpc_service.go @@ -23,7 +23,6 @@ import ( "google.golang.org/grpc" - "k8s.io/klog/v2" kmsapi "k8s.io/kms/apis/v2" ) @@ -45,8 +44,6 @@ func NewGRPCService( kmsService Service, ) *GRPCService { - klog.V(4).InfoS("KMS plugin configured", "address", address, "timeout", timeout) - return &GRPCService{ addr: address, timeout: timeout, @@ -70,14 +67,12 @@ func (s *GRPCService) ListenAndServe() error { kmsapi.RegisterKeyManagementServiceServer(gs, s) - klog.V(4).InfoS("kms plugin serving", "address", s.addr) return gs.Serve(ln) } // Shutdown performs a graceful shutdown. Doesn't accept new connections and // blocks until all pending RPCs are finished. func (s *GRPCService) Shutdown() { - klog.V(4).InfoS("kms plugin shutdown", "address", s.addr) if s.server != nil { s.server.GracefulStop() } @@ -86,7 +81,6 @@ func (s *GRPCService) Shutdown() { // Close stops the server by closing all connections immediately and cancels // all active RPCs. func (s *GRPCService) Close() { - klog.V(4).InfoS("kms plugin close", "address", s.addr) if s.server != nil { s.server.Stop() } @@ -108,8 +102,6 @@ func (s *GRPCService) Status(ctx context.Context, _ *kmsapi.StatusRequest) (*kms // Decrypt sends a decryption request to specified kms service. func (s *GRPCService) Decrypt(ctx context.Context, req *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { - klog.V(4).InfoS("decrypt request received", "id", req.Uid) - plaintext, err := s.kmsService.Decrypt(ctx, req.Uid, &DecryptRequest{ Ciphertext: req.Ciphertext, KeyID: req.KeyId, @@ -126,8 +118,6 @@ func (s *GRPCService) Decrypt(ctx context.Context, req *kmsapi.DecryptRequest) ( // Encrypt sends an encryption request to specified kms service. func (s *GRPCService) Encrypt(ctx context.Context, req *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { - klog.V(4).InfoS("encrypt request received", "id", req.Uid) - encRes, err := s.kmsService.Encrypt(ctx, req.Uid, req.Plaintext) if err != nil { return nil, err diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index 3a8d765f1..e59844786 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -156,7 +156,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str } r := &spec3.RequestBody{ RequestBodyProps: spec3.RequestBodyProps{ - Content: map[string]*spec3.MediaType{}, + Content: map[string]*spec3.MediaType{}, + Description: param.Description(), + Required: param.Required(), }, } for _, consume := range consumes { @@ -172,9 +174,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str return nil, nil } -func newOpenAPI(config *common.Config) openAPI { +func newOpenAPI(config *common.OpenAPIV3Config) openAPI { o := openAPI{ - config: common.ConvertConfigToV3(config), + config: config, spec: &spec3.OpenAPI{ Version: "3.0.0", Info: config.Info, @@ -313,12 +315,12 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error { // BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. // // Deprecated: BuildOpenAPISpecFromRoutes should be used instead. -func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config) } // BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. -func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { a := newOpenAPI(config) err := a.buildOpenAPISpec(webServices) if err != nil { @@ -330,7 +332,7 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com // BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. // BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the // passed type names. -func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) { +func BuildOpenAPIDefinitionsForResources(config *common.OpenAPIV3Config, names ...string) (map[string]*spec.Schema, error) { o := newOpenAPI(config) // We can discard the return value of toSchema because all we care about is the side effect of calling it. // All the models created for this resource get added to o.swagger.Definitions diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 76415b783..a66fe8a09 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package cache provides a cache mechanism based on etags to lazily +// Package cached provides a cache mechanism based on etags to lazily // build, and/or cache results from expensive operation such that those // operations are not repeated unnecessarily. The operations can be // created as a tree, and replaced dynamically as needed. @@ -25,16 +25,18 @@ limitations under the License. // // This package uses a source/transform/sink model of caches to build // the dependency tree, and can be used as follows: -// - [NewSource]: A source cache that recomputes the content every time. -// - [NewStaticSource]: A source cache that always produces the +// - [Func]: A source cache that recomputes the content every time. +// - [Once]: A source cache that always produces the // same content, it is only called once. -// - [NewTransformer]: A cache that transforms data from one format to +// - [Transform]: A cache that transforms data from one format to // another. It's only refreshed when the source changes. -// - [NewMerger]: A cache that aggregates multiple caches into one. +// - [Merge]: A cache that aggregates multiple caches in a map into one. // It's only refreshed when the source changes. -// - [Replaceable]: A cache adapter that can be atomically -// replaced with a new one, and saves the previous results in case an -// error pops-up. +// - [MergeList]: A cache that aggregates multiple caches in a list into one. +// It's only refreshed when the source changes. +// - [Atomic]: A cache adapter that atomically replaces the source with a new one. +// - [LastSuccess]: A cache adapter that caches the last successful and returns +// it if the next call fails. It extends [Atomic]. // // # Etags // @@ -54,61 +56,45 @@ import ( "sync/atomic" ) -// Result is the content returned from a call to a cache. It can either -// be created with [NewResultOK] if the call was a success, or -// [NewResultErr] if the call resulted in an error. +// Value is wrapping a value behind a getter for lazy evaluation. +type Value[T any] interface { + Get() (value T, etag string, err error) +} + +// Result is wrapping T and error into a struct for cases where a tuple is more +// convenient or necessary in Golang. type Result[T any] struct { - Data T - Etag string - Err error + Value T + Etag string + Err error } -// NewResultOK creates a new [Result] for a successful operation. -func NewResultOK[T any](data T, etag string) Result[T] { - return Result[T]{ - Data: data, - Etag: etag, - } +func (r Result[T]) Get() (T, string, error) { + return r.Value, r.Etag, r.Err } -// NewResultErr creates a new [Result] when an error has happened. -func NewResultErr[T any](err error) Result[T] { - return Result[T]{ - Err: err, - } +// Func wraps a (thread-safe) function as a Value[T]. +func Func[T any](fn func() (T, string, error)) Value[T] { + return valueFunc[T](fn) } -// Result can be treated as a [Data] if necessary. -func (r Result[T]) Get() Result[T] { - return r +type valueFunc[T any] func() (T, string, error) + +func (c valueFunc[T]) Get() (T, string, error) { + return c() } -// Data is a cache that performs an action whose result data will be -// cached. It also returns an "etag" identifier to version the cache, so -// that the caller can know if they have the most recent version of the -// cache (and can decide to cache some operation based on that). -// -// The [NewMerger] and [NewTransformer] automatically handle -// that for you by checking if the etag is updated before calling the -// merging or transforming function. -type Data[T any] interface { - // Returns the cached data, as well as an "etag" to identify the - // version of the cache, or an error if something happened. - Get() Result[T] +// Static returns constant values. +func Static[T any](value T, etag string) Value[T] { + return Result[T]{Value: value, Etag: etag} } -// NewMerger creates a new merge cache, a cache that merges the result -// of other caches. The function only gets called if any of the -// dependency has changed. +// Merge merges a of cached values. The merge function only gets called if any of +// the dependency has changed. // // If any of the dependency returned an error before, or any of the // dependency returned an error this time, or if the mergeFn failed -// before, then the function is reran. -// -// The caches and results are mapped by K so that associated data can be -// retrieved. The map of dependencies can not be modified after -// creation, and a new merger should be created (and probably replaced -// using a [Replaceable]). +// before, then the function is run again. // // Note that this assumes there is no "partial" merge, the merge // function will remerge all the dependencies together everytime. Since @@ -118,18 +104,19 @@ type Data[T any] interface { // Also note that Golang map iteration is not stable. If the mergeFn // depends on the order iteration to be stable, it will need to // implement its own sorting or iteration order. -func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { - listCaches := make([]Data[T], 0, len(caches)) - // maps from index to key +func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { + list := make([]Value[T], 0, len(caches)) + + // map from index to key indexes := make(map[int]K, len(caches)) i := 0 for k := range caches { - listCaches = append(listCaches, caches[k]) + list = append(list, caches[k]) indexes[i] = k i++ } - return NewListMerger(func(results []Result[T]) Result[V] { + return MergeList(func(results []Result[T]) (V, string, error) { if len(results) != len(indexes) { panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) } @@ -138,20 +125,11 @@ func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Res m[indexes[i]] = results[i] } return mergeFn(m) - }, listCaches) -} - -type listMerger[T, V any] struct { - lock sync.Mutex - mergeFn func([]Result[T]) Result[V] - caches []Data[T] - cacheResults []Result[T] - result Result[V] + }, list) } -// NewListMerger creates a new merge cache that merges the results of -// other caches in list form. The function only gets called if any of -// the dependency has changed. +// MergeList merges a list of cached values. The function only gets called if +// any of the dependency has changed. // // The benefit of ListMerger over the basic Merger is that caches are // stored in an ordered list so the order of the cache will be @@ -165,31 +143,37 @@ type listMerger[T, V any] struct { // function will remerge all the dependencies together everytime. Since // the list of dependencies is constant, there is no way to save some // partial merge information either. -func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { +func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { return &listMerger[T, V]{ - mergeFn: mergeFn, - caches: caches, + mergeFn: mergeFn, + delegates: delegates, } } +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) (V, string, error) + delegates []Value[T] + cache []Result[T] + result Result[V] +} + func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { - cacheResults := make([]Result[T], len(c.caches)) + cacheResults := make([]Result[T], len(c.delegates)) ch := make(chan struct { int Result[T] - }, len(c.caches)) - for i := range c.caches { + }, len(c.delegates)) + for i := range c.delegates { go func(index int) { + value, etag, err := c.delegates[index].Get() ch <- struct { int Result[T] - }{ - index, - c.caches[index].Get(), - } + }{index, Result[T]{Value: value, Etag: etag, Err: err}} }(i) } - for i := 0; i < len(c.caches); i++ { + for i := 0; i < len(c.delegates); i++ { res := <-ch cacheResults[res.int] = res.Result } @@ -197,16 +181,16 @@ func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { } func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { - if c.cacheResults == nil { + if c.cache == nil { return true } if c.result.Err != nil { return true } - if len(results) != len(c.cacheResults) { - panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) + if len(results) != len(c.cache) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) } - for i, oldResult := range c.cacheResults { + for i, oldResult := range c.cache { newResult := results[i] if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { return true @@ -215,98 +199,92 @@ func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { return false } -func (c *listMerger[T, V]) Get() Result[V] { +func (c *listMerger[T, V]) Get() (V, string, error) { c.lock.Lock() defer c.lock.Unlock() cacheResults := c.prepareResultsLocked() if c.needsRunningLocked(cacheResults) { - c.cacheResults = cacheResults - c.result = c.mergeFn(c.cacheResults) + c.cache = cacheResults + c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) } - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// NewTransformer creates a new cache that transforms the result of -// another cache. The transformFn will only be called if the source -// cache has updated the output, otherwise, the cached result will be -// returned. +// Transform the result of another cached value. The transformFn will only be called +// if the source has updated, otherwise, the result will be returned. // // If the dependency returned an error before, or it returns an error // this time, or if the transformerFn failed before, the function is // reran. -func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { - return NewListMerger(func(caches []Result[T]) Result[V] { - if len(caches) != 1 { - panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) +func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { + return MergeList(func(delegates []Result[T]) (V, string, error) { + if len(delegates) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) } - return transformerFn(caches[0]) - }, []Data[T]{source}) -} - -// NewSource creates a new cache that generates some data. This -// will always be called since we don't know the origin of the data and -// if it needs to be updated or not. sourceFn MUST be thread-safe. -func NewSource[T any](sourceFn func() Result[T]) Data[T] { - c := source[T](sourceFn) - return &c + return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) + }, []Value[T]{source}) } -type source[T any] func() Result[T] - -func (c *source[T]) Get() Result[T] { - return (*c)() -} - -// NewStaticSource creates a new cache that always generates the -// same data. This will only be called once (lazily). -func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { - return &static[T]{ - fn: staticFn, +// Once calls Value[T].Get() lazily and only once, even in case of an error result. +func Once[T any](d Value[T]) Value[T] { + return &once[T]{ + data: d, } } -type static[T any] struct { +type once[T any] struct { once sync.Once - fn func() Result[T] + data Value[T] result Result[T] } -func (c *static[T]) Get() Result[T] { +func (c *once[T]) Get() (T, string, error) { c.once.Do(func() { - c.result = c.fn() + c.result.Value, c.result.Etag, c.result.Err = c.data.Get() }) - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// Replaceable is a cache that carries the result even when the cache is -// replaced. This is the type that should typically be stored in -// structs. -type Replaceable[T any] struct { - cache atomic.Pointer[Data[T]] - result atomic.Pointer[Result[T]] +// Replaceable extends the Value[T] interface with the ability to change the +// underlying Value[T] after construction. +type Replaceable[T any] interface { + Value[T] + Store(Value[T]) } -// Get retrieves the data from the underlying source. [Replaceable] -// implements the [Data] interface itself. This is a pass-through -// that calls the most recent underlying cache. If the cache fails but -// previously had returned a success, that success will be returned -// instead. If the cache fails but we never returned a success, that -// failure is returned. -func (c *Replaceable[T]) Get() Result[T] { - result := (*c.cache.Load()).Get() - - for { - cResult := c.result.Load() - if result.Err != nil && cResult != nil && cResult.Err == nil { - return *cResult - } - if c.result.CompareAndSwap(cResult, &result) { - return result +// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements +// Replaceable[T]. +type Atomic[T any] struct { + value atomic.Pointer[Value[T]] +} + +var _ Replaceable[[]byte] = &Atomic[[]byte]{} + +func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } +func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } + +// LastSuccess calls Value[T].Get(), but hides errors by returning the last +// success if there has been any. +type LastSuccess[T any] struct { + Atomic[T] + success atomic.Pointer[Result[T]] +} + +var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} + +func (c *LastSuccess[T]) Get() (T, string, error) { + success := c.success.Load() + value, etag, err := c.Atomic.Get() + if err == nil { + if success == nil { + c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) } + return value, etag, err + } + + if success != nil { + return success.Value, success.Etag, success.Err } -} -// Replace changes the cache. -func (c *Replaceable[T]) Replace(cache Data[T]) { - c.cache.Swap(&cache) + return value, etag, err } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 1a6c12e17..2e15e163c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -22,7 +22,6 @@ import ( "github.com/emicklei/go-restful/v3" - "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -172,43 +171,6 @@ type OpenAPIV3Config struct { DefaultSecurity []map[string][]string } -// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object -func ConvertConfigToV3(config *Config) *OpenAPIV3Config { - if config == nil { - return nil - } - - v3Config := &OpenAPIV3Config{ - Info: config.Info, - IgnorePrefixes: config.IgnorePrefixes, - GetDefinitions: config.GetDefinitions, - GetOperationIDAndTags: config.GetOperationIDAndTags, - GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, - GetDefinitionName: config.GetDefinitionName, - Definitions: config.Definitions, - SecuritySchemes: make(spec3.SecuritySchemes), - DefaultSecurity: config.DefaultSecurity, - DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), - - CommonResponses: make(map[int]*spec3.Response), - ResponseDefinitions: make(map[string]*spec3.Response), - } - - if config.SecurityDefinitions != nil { - for s, securityScheme := range *config.SecurityDefinitions { - v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) - } - } - for k, commonResponse := range config.CommonResponses { - v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) - } - - for k, responseDefinition := range config.ResponseDefinitions { - v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) - } - return v3Config -} - type typeInfo struct { name string format string diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 0eb3f2360..5fc629773 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -30,6 +30,7 @@ import ( openapi_v2 "github.com/google/gnostic-models/openapiv2" "github.com/google/uuid" "github.com/munnerz/goautoneg" + klog "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/builder" "k8s.io/kube-openapi/pkg/cached" @@ -59,52 +60,52 @@ type timedSpec struct { // OpenAPIService is the service responsible for serving OpenAPI spec. It has // the ability to safely change the spec while serving it. type OpenAPIService struct { - specCache cached.Replaceable[*spec.Swagger] - jsonCache cached.Data[timedSpec] - protoCache cached.Data[timedSpec] + specCache cached.LastSuccess[*spec.Swagger] + jsonCache cached.Value[timedSpec] + protoCache cached.Value[timedSpec] } // NewOpenAPIService builds an OpenAPIService starting with the given spec. func NewOpenAPIService(swagger *spec.Swagger) *OpenAPIService { - return NewOpenAPIServiceLazy(cached.NewResultOK(swagger, uuid.New().String())) + return NewOpenAPIServiceLazy(cached.Static(swagger, uuid.New().String())) } // NewOpenAPIServiceLazy builds an OpenAPIService from lazy spec. -func NewOpenAPIServiceLazy(swagger cached.Data[*spec.Swagger]) *OpenAPIService { +func NewOpenAPIServiceLazy(swagger cached.Value[*spec.Swagger]) *OpenAPIService { o := &OpenAPIService{} o.UpdateSpecLazy(swagger) - o.jsonCache = cached.NewTransformer[*spec.Swagger](func(result cached.Result[*spec.Swagger]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.jsonCache = cached.Transform[*spec.Swagger](func(spec *spec.Swagger, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - json, err := result.Data.MarshalJSON() + json, err := spec.MarshalJSON() if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil }, &o.specCache) - o.protoCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.protoCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - proto, err := ToProtoBinary(result.Data.spec) + proto, err := ToProtoBinary(ts.spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } // We can re-use the same etag as json because of the Vary header. - return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil }, o.jsonCache) return o } func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error { - o.UpdateSpecLazy(cached.NewResultOK(swagger, uuid.New().String())) + o.UpdateSpecLazy(cached.Static(swagger, uuid.New().String())) return nil } -func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Data[*spec.Swagger]) { - o.specCache.Replace(swagger) +func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Value[*spec.Swagger]) { + o.specCache.Store(swagger) } func ToProtoBinary(json []byte) ([]byte, error) { @@ -130,7 +131,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl Type string SubType string ReturnedContentType string - GetDataAndEtag cached.Data[timedSpec] + GetDataAndEtag cached.Value[timedSpec] }{ {"application", subTypeJSON, "application/" + subTypeJSON, o.jsonCache}, {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf, o.protoCache}, @@ -154,11 +155,11 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl continue } // serve the first matching media type in the sorted clause list - result := accepts.GetDataAndEtag.Get() - if result.Err != nil { - klog.Errorf("Error in OpenAPI handler: %s", result.Err) + ts, etag, err := accepts.GetDataAndEtag.Get() + if err != nil { + klog.Errorf("Error in OpenAPI handler: %s", err) // only return a 503 if we have no older cache data to serve - if result.Data.spec == nil { + if ts.spec == nil { w.WriteHeader(http.StatusServiceUnavailable) return } @@ -167,9 +168,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl w.Header().Set("Content-Type", accepts.ReturnedContentType) // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag - w.Header().Set("Etag", strconv.Quote(result.Etag)) + w.Header().Set("Etag", strconv.Quote(etag)) // ServeContent will take care of caching using eTag. - http.ServeContent(w, r, servePath, result.Data.lastModified, bytes.NewReader(result.Data.spec)) + http.ServeContent(w, r, servePath, ts.lastModified, bytes.NewReader(ts.spec)) return } } diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index 2263e2f32..fc4563488 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -33,6 +33,7 @@ import ( openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" + "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" @@ -73,38 +74,38 @@ type timedSpec struct { // This type is protected by the lock on OpenAPIService. type openAPIV3Group struct { - specCache cached.Replaceable[*spec3.OpenAPI] - pbCache cached.Data[timedSpec] - jsonCache cached.Data[timedSpec] + specCache cached.LastSuccess[*spec3.OpenAPI] + pbCache cached.Value[timedSpec] + jsonCache cached.Value[timedSpec] } func newOpenAPIV3Group() *openAPIV3Group { o := &openAPIV3Group{} - o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - json, err := json.Marshal(result.Data) + json, err := json.Marshal(spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil }, &o.specCache) - o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - proto, err := ToV3ProtoBinary(result.Data.spec) + proto, err := ToV3ProtoBinary(ts.spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil }, o.jsonCache) return o } -func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { - o.specCache.Replace(openapi) +func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { + o.specCache.Store(openapi) } // OpenAPIService is the service responsible for serving OpenAPI spec. It has @@ -114,7 +115,7 @@ type OpenAPIService struct { mutex sync.Mutex v3Schema map[string]*openAPIV3Group - discoveryCache cached.Replaceable[timedSpec] + discoveryCache cached.LastSuccess[timedSpec] } func computeETag(data []byte) string { @@ -137,20 +138,20 @@ func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} o.v3Schema = make(map[string]*openAPIV3Group) // We're not locked because we haven't shared the structure yet. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) return o } -func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { - caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { + caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) for gvName, group := range o.v3Schema { caches[gvName] = group.jsonCache } - return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { + return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} for gvName, result := range results { if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + return timedSpec{}, "", result.Err } discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), @@ -158,9 +159,9 @@ func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { } j, err := json.Marshal(discovery) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) + return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil }, caches) } @@ -171,32 +172,32 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - result := cached.Result[timedSpec]{} switch getType { case subTypeJSON: - result = v.jsonCache.Get() + ts, etag, err := v.jsonCache.Get() + return ts.spec, etag, ts.lastModified, err case subTypeProtobuf, subTypeProtobufDeprecated: - result = v.pbCache.Get() + ts, etag, err := v.pbCache.Get() + return ts.spec, etag, ts.lastModified, err default: return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } // UpdateGroupVersionLazy adds or updates an existing group with the new cached. -func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { o.mutex.Lock() defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { o.v3Schema[group] = newOpenAPIV3Group() // Since there is a new item, we need to re-build the cache map. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } o.v3Schema[group].UpdateSpec(openapi) } func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { - o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) + o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) } func (o *OpenAPIService) DeleteGroupVersion(group string) { @@ -204,19 +205,19 @@ func (o *OpenAPIService) DeleteGroupVersion(group string) { defer o.mutex.Unlock() delete(o.v3Schema, group) // Rebuild the merge cache map since the items have changed. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - result := o.discoveryCache.Get() - if result.Err != nil { - klog.Errorf("Error serving discovery: %s", result.Err) + ts, etag, err := o.discoveryCache.Get() + if err != nil { + klog.Errorf("Error serving discovery: %s", err) w.WriteHeader(http.StatusInternalServerError) return } - w.Header().Set("Etag", strconv.Quote(result.Etag)) + w.Header().Set("Etag", strconv.Quote(etag)) w.Header().Set("Content-Type", "application/json") - http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) + http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index bef603782..da5485f6a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -22,3 +22,4 @@ var UseOptimizedJSONUnmarshalingV3 bool = true // Used by tests to selectively disable experimental JSON marshaler var UseOptimizedJSONMarshaling bool = true +var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go deleted file mode 100644 index e993fe23d..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapiconv - -import ( - "strings" - - klog "k8s.io/klog/v2" - builderutil "k8s.io/kube-openapi/pkg/builder3/util" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var OpenAPIV2DefPrefix = "#/definitions/" -var OpenAPIV3DefPrefix = "#/components/schemas/" - -// ConvertV2ToV3 converts an OpenAPI V2 object into V3. -// Certain references may be shared between the V2 and V3 objects in the conversion. -func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { - v3Spec := &spec3.OpenAPI{ - Version: "3.0.0", - Info: v2Spec.Info, - ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), - Paths: ConvertPaths(v2Spec.Paths), - Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), - } - - return v3Spec -} - -func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { - if v2ED == nil { - return nil - } - return &spec3.ExternalDocumentation{ - ExternalDocumentationProps: spec3.ExternalDocumentationProps{ - Description: v2ED.Description, - URL: v2ED.URL, - }, - } -} - -func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { - components := &spec3.Components{} - - if v2Definitions != nil { - components.Schemas = make(map[string]*spec.Schema) - } - for s, schema := range v2Definitions { - components.Schemas[s] = ConvertSchema(&schema) - } - if v2SecurityDefinitions != nil { - components.SecuritySchemes = make(spec3.SecuritySchemes) - } - for s, securityScheme := range v2SecurityDefinitions { - components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) - } - if v2Responses != nil { - components.Responses = make(map[string]*spec3.Response) - } - for r, response := range v2Responses { - components.Responses[r] = ConvertResponse(&response, produces) - } - - return components -} - -func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { - if v2Schema == nil { - return nil - } - v3Schema := spec.Schema{ - VendorExtensible: v2Schema.VendorExtensible, - SchemaProps: v2Schema.SchemaProps, - SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, - ExtraProps: v2Schema.ExtraProps, - } - - if refString := v2Schema.Ref.String(); refString != "" { - if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { - v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) - } else { - klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) - } - } - - if v2Schema.Properties != nil { - v3Schema.Properties = make(map[string]spec.Schema) - for key, property := range v2Schema.Properties { - v3Schema.Properties[key] = *ConvertSchema(&property) - } - } - if v2Schema.Items != nil { - v3Schema.Items = &spec.SchemaOrArray{ - Schema: ConvertSchema(v2Schema.Items.Schema), - Schemas: ConvertSchemaList(v2Schema.Items.Schemas), - } - } - - if v2Schema.AdditionalProperties != nil { - v3Schema.AdditionalProperties = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), - Allows: v2Schema.AdditionalProperties.Allows, - } - } - if v2Schema.AdditionalItems != nil { - v3Schema.AdditionalItems = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), - Allows: v2Schema.AdditionalItems.Allows, - } - } - - return builderutil.WrapRefs(&v3Schema) -} - -func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { - if v2SchemaList == nil { - return nil - } - v3SchemaList := []spec.Schema{} - for _, s := range v2SchemaList { - v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) - } - return v3SchemaList -} - -func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { - if v2securityScheme == nil { - return nil - } - securityScheme := &spec3.SecurityScheme{ - VendorExtensible: v2securityScheme.VendorExtensible, - SecuritySchemeProps: spec3.SecuritySchemeProps{ - Description: v2securityScheme.Description, - Type: v2securityScheme.Type, - Name: v2securityScheme.Name, - In: v2securityScheme.In, - }, - } - - if v2securityScheme.Flow != "" { - securityScheme.Flows = make(map[string]*spec3.OAuthFlow) - securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ - OAuthFlowProps: spec3.OAuthFlowProps{ - AuthorizationUrl: v2securityScheme.AuthorizationURL, - TokenUrl: v2securityScheme.TokenURL, - Scopes: v2securityScheme.Scopes, - }, - } - } - return securityScheme -} - -func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { - if v2Paths == nil { - return nil - } - paths := &spec3.Paths{ - VendorExtensible: v2Paths.VendorExtensible, - } - - if v2Paths.Paths != nil { - paths.Paths = make(map[string]*spec3.Path) - } - for k, v := range v2Paths.Paths { - paths.Paths[k] = ConvertPathItem(v) - } - return paths -} - -func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { - path := &spec3.Path{ - Refable: v2pathItem.Refable, - PathProps: spec3.PathProps{ - Get: ConvertOperation(v2pathItem.Get), - Put: ConvertOperation(v2pathItem.Put), - Post: ConvertOperation(v2pathItem.Post), - Delete: ConvertOperation(v2pathItem.Delete), - Options: ConvertOperation(v2pathItem.Options), - Head: ConvertOperation(v2pathItem.Head), - Patch: ConvertOperation(v2pathItem.Patch), - }, - VendorExtensible: v2pathItem.VendorExtensible, - } - for _, param := range v2pathItem.Parameters { - path.Parameters = append(path.Parameters, ConvertParameter(param)) - } - return path -} - -func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { - if v2Operation == nil { - return nil - } - operation := &spec3.Operation{ - VendorExtensible: v2Operation.VendorExtensible, - OperationProps: spec3.OperationProps{ - Description: v2Operation.Description, - ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), - Tags: v2Operation.Tags, - Summary: v2Operation.Summary, - Deprecated: v2Operation.Deprecated, - OperationId: v2Operation.ID, - }, - } - - for _, param := range v2Operation.Parameters { - if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { - operation.OperationProps.RequestBody = &spec3.RequestBody{ - RequestBodyProps: spec3.RequestBodyProps{}, - } - if v2Operation.Consumes != nil { - operation.RequestBody.Content = make(map[string]*spec3.MediaType) - } - for _, consumer := range v2Operation.Consumes { - operation.RequestBody.Content[consumer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(param.ParamProps.Schema), - }, - } - } - } else { - operation.Parameters = append(operation.Parameters, ConvertParameter(param)) - } - } - - operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ - Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), - }, - VendorExtensible: v2Operation.Responses.VendorExtensible, - } - - if v2Operation.Responses.StatusCodeResponses != nil { - operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) - } - for k, v := range v2Operation.Responses.StatusCodeResponses { - operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) - } - return operation -} - -func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { - if v2Response == nil { - return nil - } - response := &spec3.Response{ - Refable: ConvertRefableResponse(v2Response.Refable), - VendorExtensible: v2Response.VendorExtensible, - ResponseProps: spec3.ResponseProps{ - Description: v2Response.Description, - }, - } - - if v2Response.Schema != nil { - if produces != nil { - response.Content = make(map[string]*spec3.MediaType) - } - for _, producer := range produces { - response.ResponseProps.Content[producer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(v2Response.Schema), - }, - } - } - } - return response -} - -func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { - param := &spec3.Parameter{ - Refable: ConvertRefableParameter(v2Param.Refable), - VendorExtensible: v2Param.VendorExtensible, - ParameterProps: spec3.ParameterProps{ - Name: v2Param.Name, - Description: v2Param.Description, - In: v2Param.In, - Required: v2Param.Required, - Schema: ConvertSchema(v2Param.Schema), - AllowEmptyValue: v2Param.AllowEmptyValue, - }, - } - // Convert SimpleSchema into Schema - if param.Schema == nil { - param.Schema = &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{v2Param.Type}, - Format: v2Param.Format, - UniqueItems: v2Param.UniqueItems, - }, - } - } - - return param -} - -func ConvertRefableParameter(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} - } - return refable -} - -func ConvertRefableResponse(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} - } - return refable -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 699291f1d..1f62c6e77 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -32,6 +32,9 @@ type Encoding struct { // MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON func (e *Encoding) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.EncodingProps) if err != nil { return nil, err @@ -43,6 +46,16 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + EncodingProps encodingPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) + return opts.MarshalNext(enc, x) +} + func (e *Encoding) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) @@ -82,3 +95,11 @@ type EncodingProps struct { // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } + +type encodingPropsOmitZero struct { + ContentType string `json:"contentType,omitempty"` + Headers map[string]*Header `json:"headers,omitempty"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 03b872717..8834a92e6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -36,6 +36,9 @@ type Example struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (e *Example) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.Refable) if err != nil { return nil, err @@ -50,6 +53,17 @@ func (e *Example) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b1, b2, b3), nil } +func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ExampleProps `json:",inline"` + spec.Extensions + } + x.Ref = e.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExampleProps = e.ExampleProps + return opts.MarshalNext(enc, x) +} func (e *Example) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index e79956721..f0515496e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -39,6 +39,9 @@ type ExternalDocumentationProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.ExternalDocumentationProps) if err != nil { return nil, err @@ -50,6 +53,16 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ExternalDocumentationProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExternalDocumentationProps = e.ExternalDocumentationProps + return opts.MarshalNext(enc, x) +} + func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index bc19dd48e..08b6246ce 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -35,6 +35,18 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ func(o *OpenAPI, c fuzz.Continue) { c.FuzzNoCustom(o) o.Version = "3.0.0" + for i, val := range o.SecurityRequirement { + if val == nil { + o.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(r *interface{}, c fuzz.Continue) { switch c.Intn(3) { @@ -169,6 +181,21 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ c.Fuzz(&v.ResponseProps) c.Fuzz(&v.VendorExtensible) }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + // Do not fuzz null values into the array. + for i, val := range v.SecurityRequirement { + if val == nil { + v.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(v *spec.Extensions, c fuzz.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index ee5a30f79..9ea30628c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -36,6 +36,9 @@ type Header struct { // MarshalJSON is a custom marshal function that knows how to encode Header as JSON func (h *Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (h *Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + HeaderProps headerPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = h.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = headerPropsOmitZero(h.HeaderProps) + return opts.MarshalNext(enc, x) +} + func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, h) @@ -109,3 +124,19 @@ type HeaderProps struct { // Examples of the header Examples map[string]*Example `json:"examples,omitempty"` } + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type headerPropsOmitZero struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index d390e69bc..47eef1edb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -35,6 +35,9 @@ type MediaType struct { // MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON func (m *MediaType) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(m) + } b1, err := json.Marshal(m.MediaTypeProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + MediaTypeProps mediaTypePropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) + return opts.MarshalNext(enc, x) +} + func (m *MediaType) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, m) @@ -84,3 +97,10 @@ type MediaTypeProps struct { // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded Encoding map[string]*Encoding `json:"encoding,omitempty"` } + +type mediaTypePropsOmitZero struct { + Schema *spec.Schema `json:"schema,omitzero"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index 28230610b..f1e102547 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -35,6 +35,9 @@ type Operation struct { // MarshalJSON is a custom marshal function that knows how to encode Operation as JSON func (o *Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (o *Operation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + spec.Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { @@ -95,3 +108,17 @@ type OperationProps struct { // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } + +type operationPropsOmitZero struct { + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + OperationId string `json:"operationId,omitempty"` + Parameters []*Parameter `json:"parameters,omitempty"` + RequestBody *RequestBody `json:"requestBody,omitzero"` + Responses *Responses `json:"responses,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 613da71a6..ada7edb63 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -36,6 +36,9 @@ type Parameter struct { // MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON func (p *Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ParameterProps parameterPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) + return opts.MarshalNext(enc, x) +} + func (p *Parameter) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) @@ -114,3 +129,19 @@ type ParameterProps struct { // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding Examples map[string]*Example `json:"examples,omitempty"` } + +type parameterPropsOmitZero struct { + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index 40d9061ac..16fbbb4dd 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -35,15 +35,41 @@ type Paths struct { // MarshalJSON is a custom marshal function that knows how to encode Paths as JSON func (p *Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Paths) + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err } - b2, err := json.Marshal(p.VendorExtensible) + + pths := make(map[string]*Path) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -144,6 +170,9 @@ type Path struct { // MarshalJSON is a custom marshal function that knows how to encode Path as JSON func (p *Path) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -159,6 +188,18 @@ func (p *Path) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + PathProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathProps = p.PathProps + return opts.MarshalNext(enc, x) +} + func (p *Path) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 33267ce67..6f8607e40 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -36,6 +36,9 @@ type RequestBody struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (r *RequestBody) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + RequestBodyProps requestBodyPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) + return opts.MarshalNext(enc, x) +} + func (r *RequestBody) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -77,6 +92,12 @@ type RequestBodyProps struct { Required bool `json:"required,omitempty"` } +type requestBodyPropsOmitZero struct { + Description string `json:"description,omitempty"` + Content map[string]*MediaType `json:"content,omitempty"` + Required bool `json:"required,omitzero"` +} + func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { var x struct { spec.Extensions diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index 95b388e6c..73e241fdc 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -37,6 +37,9 @@ type Responses struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (r *Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -48,6 +51,25 @@ func (r *Responses) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitzero"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + func (r *Responses) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -179,6 +201,9 @@ type Response struct { // MarshalJSON is a custom marshal function that knows how to encode Response as JSON func (r *Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -194,6 +219,18 @@ func (r *Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + ResponseProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = r.ResponseProps + return opts.MarshalNext(enc, x) +} + func (r *Response) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -247,6 +284,9 @@ type Link struct { // MarshalJSON is a custom marshal function that knows how to encode Link as JSON func (r *Link) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -262,6 +302,18 @@ func (r *Link) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + LinkProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.LinkProps = r.LinkProps + return opts.MarshalNext(enc, x) +} + func (r *Link) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index edf7e6de3..dd1e98ed8 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -32,6 +34,9 @@ type SecurityScheme struct { // MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -47,6 +52,18 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + SecuritySchemeProps `json:",inline"` + spec.Extensions + } + x.Ref = s.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index d5df0a781..654a42c06 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -41,6 +41,9 @@ type ServerProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *Server) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (s *Server) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerProps = s.ServerProps + return opts.MarshalNext(enc, x) +} + func (s *Server) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) @@ -96,6 +109,9 @@ type ServerVariableProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *ServerVariable) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerVariableProps) if err != nil { return nil, err @@ -107,6 +123,16 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerVariableProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerVariableProps = s.ServerVariableProps + return opts.MarshalNext(enc, x) +} + func (s *ServerVariable) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index bed096fb7..5db819c7f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -36,6 +36,8 @@ type OpenAPI struct { Servers []*Server `json:"servers,omitempty"` // Components hold various schemas for the specification Components *Components `json:"components,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used across the API + SecurityRequirement []map[string][]string `json:"security,omitempty"` // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } @@ -48,3 +50,26 @@ func (o *OpenAPI) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &p) } + +func (o *OpenAPI) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + return json.Marshal(&p) +} + +func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type OpenAPIOmitZero struct { + Version string `json:"openapi"` + Info *spec.Info `json:"info"` + Paths *Paths `json:"paths,omitzero"` + Servers []*Server `json:"servers,omitempty"` + Components *Components `json:"components,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + } + x := (*OpenAPIOmitZero)(o) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go deleted file mode 100644 index c66f998f5..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "github.com/go-openapi/jsonreference" - "github.com/google/go-cmp/cmp" - fuzz "github.com/google/gofuzz" -) - -var SwaggerFuzzFuncs []interface{} = []interface{}{ - func(v *Responses, c fuzz.Continue) { - c.FuzzNoCustom(v) - if v.Default != nil { - // Check if we hit maxDepth and left an incomplete value - if v.Default.Description == "" { - v.Default = nil - v.StatusCodeResponses = nil - } - } - - // conversion has no way to discern empty statusCodeResponses from - // nil, since "default" is always included in the map. - // So avoid empty responses list - if len(v.StatusCodeResponses) == 0 { - v.StatusCodeResponses = nil - } - }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v != nil { - // force non-nil - v.Responses = &Responses{} - c.Fuzz(v.Responses) - - v.Schemes = nil - if c.RandBool() { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - } - }, - func(v map[int]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Prevent negative numbers - num := c.Intn(4) - for i := 0; i < num+2; i++ { - val := Response{} - c.Fuzz(&val) - - val.Description = c.RandString() + "x" - v[100*(i+1)+c.Intn(100)] = val - } - }, - func(v map[string]PathItem, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - num := c.Intn(5) - for i := 0; i < num+2; i++ { - val := PathItem{} - c.Fuzz(&val) - - // Ref params are only allowed in certain locations, so - // possibly add a few to PathItems - numRefsToAdd := c.Intn(5) - for i := 0; i < numRefsToAdd; i++ { - theRef := Parameter{} - c.Fuzz(&theRef.Refable) - - val.Parameters = append(val.Parameters, theRef) - } - - v["/"+c.RandString()] = val - } - }, - func(v *SchemaOrArray, c fuzz.Continue) { - *v = SchemaOrArray{} - // gnostic parser just doesn't support more - // than one Schema here - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - }, - func(v *SchemaOrBool, c fuzz.Continue) { - *v = SchemaOrBool{} - - if c.RandBool() { - v.Allows = c.RandBool() - } else { - v.Schema = &Schema{} - v.Allows = true - c.Fuzz(&v.Schema) - } - }, - func(v map[string]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Response definitions are not allowed to - // be refs - for i := 0; i < c.Intn(5)+1; i++ { - resp := &Response{} - - c.Fuzz(resp) - resp.Ref = Ref{} - resp.Description = c.RandString() + "x" - - // Response refs are not vendor extensible by gnostic - resp.VendorExtensible.Extensions = nil - v[c.RandString()+"x"] = *resp - } - }, - func(v *Header, c fuzz.Continue) { - if v != nil { - c.FuzzNoCustom(v) - - // descendant Items of Header may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Ref, c fuzz.Continue) { - *v = Ref{} - v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - }, - func(v *Response, c fuzz.Continue) { - *v = Response{} - if c.RandBool() { - v.Ref = Ref{} - v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - } else { - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.Schema) - c.Fuzz(&v.ResponseProps) - - v.Headers = nil - v.Ref = Ref{} - - n := 0 - c.Fuzz(&n) - if n != 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - num := c.Intn(4) - for i := 0; i < num; i++ { - if v.Headers == nil { - v.Headers = make(map[string]Header) - } - hdr := Header{} - c.Fuzz(&hdr) - if hdr.Type == "" { - // hit maxDepth, just abort trying to make haders - v.Headers = nil - break - } - v.Headers[c.RandString()+"x"] = hdr - } - } else { - v.Headers = nil - } - } - - v.Description = c.RandString() + "x" - - // Gnostic parses empty as nil, so to keep avoid putting empty - if len(v.Headers) == 0 { - v.Headers = nil - } - }, - func(v **Info, c fuzz.Continue) { - // Info is never nil - *v = &Info{} - c.FuzzNoCustom(*v) - - (*v).Title = c.RandString() + "x" - }, - func(v *Extensions, c fuzz.Continue) { - // gnostic parser only picks up x- vendor extensions - numChildren := c.Intn(5) - for i := 0; i < numChildren; i++ { - if *v == nil { - *v = Extensions{} - } - (*v)["x-"+c.RandString()] = c.RandString() - } - }, - func(v *Swagger, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v.Paths == nil { - // Force paths non-nil since it does not have omitempty in json tag. - // This means a perfect roundtrip (via json) is impossible, - // since we can't tell the difference between empty/unspecified paths - v.Paths = &Paths{} - c.Fuzz(v.Paths) - } - - v.Swagger = "2.0" - - // Gnostic support serializing ID at all - // unavoidable data loss - v.ID = "" - - v.Schemes = nil - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, - func(v *SecurityScheme, c fuzz.Continue) { - v.Description = c.RandString() + "x" - c.Fuzz(&v.VendorExtensible) - - switch c.Intn(3) { - case 0: - v.Type = "basic" - case 1: - v.Type = "apiKey" - switch c.Intn(2) { - case 0: - v.In = "header" - case 1: - v.In = "query" - default: - panic("unreachable") - } - v.Name = "x" + c.RandString() - case 2: - v.Type = "oauth2" - - switch c.Intn(4) { - case 0: - v.Flow = "accessCode" - v.TokenURL = "https://" + c.RandString() - v.AuthorizationURL = "https://" + c.RandString() - case 1: - v.Flow = "application" - v.TokenURL = "https://" + c.RandString() - case 2: - v.Flow = "implicit" - v.AuthorizationURL = "https://" + c.RandString() - case 3: - v.Flow = "password" - v.TokenURL = "https://" + c.RandString() - default: - panic("unreachable") - } - c.Fuzz(&v.Scopes) - default: - panic("unreachable") - } - }, - func(v *interface{}, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *string, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *ExternalDocumentation, c fuzz.Continue) { - v.Description = c.RandString() + "x" - v.URL = c.RandString() + "x" - }, - func(v *SimpleSchema, c fuzz.Continue) { - c.FuzzNoCustom(v) - - switch c.Intn(5) { - case 0: - v.Type = "string" - case 1: - v.Type = "number" - case 2: - v.Type = "boolean" - case 3: - v.Type = "integer" - case 4: - v.Type = "array" - default: - panic("unreachable") - } - - switch c.Intn(5) { - case 0: - v.CollectionFormat = "csv" - case 1: - v.CollectionFormat = "ssv" - case 2: - v.CollectionFormat = "tsv" - case 3: - v.CollectionFormat = "pipes" - case 4: - v.CollectionFormat = "" - default: - panic("unreachable") - } - - // None of the types which include SimpleSchema in our definitions - // actually support "example" in the official spec - v.Example = nil - - // unsupported by openapi - v.Nullable = false - }, - func(v *int64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0 { - *v = 1 - } - }, - func(v *float64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0.0 { - *v = 1.0 - } - }, - func(v *Parameter, c fuzz.Continue) { - if v == nil { - return - } - c.Fuzz(&v.VendorExtensible) - if c.RandBool() { - // body param - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - v.In = "body" - c.Fuzz(&v.Description) - c.Fuzz(&v.Required) - - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - } else { - c.Fuzz(&v.SimpleSchema) - c.Fuzz(&v.CommonValidations) - v.AllowEmptyValue = false - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - - switch c.Intn(4) { - case 0: - // Header param - v.In = "header" - case 1: - // Form data param - v.In = "formData" - v.AllowEmptyValue = c.RandBool() - case 2: - // Query param - v.In = "query" - v.AllowEmptyValue = c.RandBool() - case 3: - // Path param - v.In = "path" - v.Required = true - default: - panic("unreachable") - } - - // descendant Items of Parameter may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Schema, c fuzz.Continue) { - if c.RandBool() { - // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) - - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) - v.Type = StringOrArray{"file"} - - } else { - // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - // c.Fuzz(&v.ExtraProps) - // ExtraProps will not roundtrip - gnostic throws out - // unrecognized keys - } - - // Not supported by official openapi v2 spec - // and stripped by k8s apiserver - v.ID = "" - v.AnyOf = nil - v.OneOf = nil - v.Not = nil - v.Nullable = false - v.AdditionalItems = nil - v.Schema = "" - v.PatternProperties = nil - v.Definitions = nil - v.Dependencies = nil - }, -} - -var SwaggerDiffOptions = []cmp.Option{ - // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields - cmp.Comparer(func(a Ref, b Ref) bool { - return a.String() == b.String() - }), -} diff --git a/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go b/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go index f201ce361..54355503f 100644 --- a/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go +++ b/vendor/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go @@ -83,6 +83,11 @@ type RuntimeStats struct { // Usage here refers to the total number of bytes occupied by images on the filesystem. // +optional ImageFs *FsStats `json:"imageFs,omitempty"` + // Stats about the underlying filesystem where container's writeable layer is stored. + // This filesystem could be the same as the primary (root) filesystem or the ImageFS. + // Usage here refers to the total number of bytes occupied by the writeable layer on the filesystem. + // +optional + ContainerFs *FsStats `json:"containerFs,omitempty"` } const ( diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go index c73d96a6c..fefb13b2b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -67,6 +67,13 @@ func GetLoadBalancerSourceRanges(service *api.Service) (utilnet.IPNetSet, error) return ipnets, nil } +// ExternallyAccessible checks if service is externally accessible. +func ExternallyAccessible(service *api.Service) bool { + return service.Spec.Type == api.ServiceTypeLoadBalancer || + service.Spec.Type == api.ServiceTypeNodePort || + (service.Spec.Type == api.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0) +} + // RequestsOnlyLocalTraffic checks if service requests OnlyLocal traffic. func RequestsOnlyLocalTraffic(service *api.Service) bool { if service.Spec.Type != api.ServiceTypeLoadBalancer && diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go new file mode 100644 index 000000000..b051c4179 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + utilnet "k8s.io/utils/net" +) + +const ( + defaultLoadBalancerSourceRanges = "0.0.0.0/0" +) + +// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0 +func IsAllowAll(ipnets utilnet.IPNetSet) bool { + for _, s := range ipnets.StringSlice() { + if s == "0.0.0.0/0" { + return true + } + } + return false +} + +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// extracting the source ranges to allow, and if not present returns a default (allow-all) value. +func GetLoadBalancerSourceRanges(service *v1.Service) (utilnet.IPNetSet, error) { + var ipnets utilnet.IPNetSet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = utilnet.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[v1.AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = utilnet.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", v1.AnnotationLoadBalancerSourceRangesKey, val) + } + } + return ipnets, nil +} + +// ExternallyAccessible checks if service is externally accessible. +func ExternallyAccessible(service *v1.Service) bool { + return service.Spec.Type == v1.ServiceTypeLoadBalancer || + service.Spec.Type == v1.ServiceTypeNodePort || + (service.Spec.Type == v1.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0) +} + +// ExternalPolicyLocal checks if service is externally accessible and has ETP = Local. +func ExternalPolicyLocal(service *v1.Service) bool { + if !ExternallyAccessible(service) { + return false + } + return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal +} + +// InternalPolicyLocal checks if service has ITP = Local. +func InternalPolicyLocal(service *v1.Service) bool { + if service.Spec.InternalTrafficPolicy == nil { + return false + } + return *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal +} + +// NeedsHealthCheck checks if service needs health check. +func NeedsHealthCheck(service *v1.Service) bool { + if service.Spec.Type != v1.ServiceTypeLoadBalancer { + return false + } + return ExternalPolicyLocal(service) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go index cb5e6eb22..8b8cadaaf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -129,7 +129,7 @@ const ( // This is an action which might be taken on a pod failure - mark the // Job's index as failed to avoid restarts within this index. This action // can only be used when backoffLimitPerIndex is set. - // This value is alpha-level. + // This value is beta-level. PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" // This is an action which might be taken on a pod failure - the counter towards @@ -306,8 +306,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 @@ -319,8 +319,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 @@ -405,7 +405,8 @@ type JobSpec struct { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy } @@ -443,15 +444,12 @@ type JobStatus struct { // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). // - // This field is alpha-level. The job controller populates the field when - // the feature gate JobPodReplacementPolicy is enabled (disabled by default). + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). // +optional Terminating *int32 // The number of active pods which have a Ready condition. - // - // This field is beta-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (enabled by default). // +optional Ready *int32 @@ -481,8 +479,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional FailedIndexes *string diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index a404263e7..0724e6378 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -113,34 +113,34 @@ var Semantic = conversion.EqualitiesOrDie( }, ) -var standardResourceQuotaScopes = sets.NewString( - string(core.ResourceQuotaScopeTerminating), - string(core.ResourceQuotaScopeNotTerminating), - string(core.ResourceQuotaScopeBestEffort), - string(core.ResourceQuotaScopeNotBestEffort), - string(core.ResourceQuotaScopePriorityClass), +var standardResourceQuotaScopes = sets.New( + core.ResourceQuotaScopeTerminating, + core.ResourceQuotaScopeNotTerminating, + core.ResourceQuotaScopeBestEffort, + core.ResourceQuotaScopeNotBestEffort, + core.ResourceQuotaScopePriorityClass, ) // IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) || str == string(core.ResourceQuotaScopeCrossNamespacePodAffinity) +func IsStandardResourceQuotaScope(scope core.ResourceQuotaScope) bool { + return standardResourceQuotaScopes.Has(scope) || scope == core.ResourceQuotaScopeCrossNamespacePodAffinity } -var podObjectCountQuotaResources = sets.NewString( - string(core.ResourcePods), +var podObjectCountQuotaResources = sets.New( + core.ResourcePods, ) -var podComputeQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), +var podComputeQuotaResources = sets.New( + core.ResourceCPU, + core.ResourceMemory, + core.ResourceLimitsCPU, + core.ResourceLimitsMemory, + core.ResourceRequestsCPU, + core.ResourceRequestsMemory, ) // IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource core.ResourceName) bool { switch scope { case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopePriorityClass, core.ResourceQuotaScopeCrossNamespacePodAffinity: @@ -152,16 +152,16 @@ func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resourc } } -var standardContainerResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), +var standardContainerResources = sets.New( + core.ResourceCPU, + core.ResourceMemory, + core.ResourceEphemeralStorage, ) // IsStandardContainerResourceName returns true if the container can make a resource request // for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) +func IsStandardContainerResourceName(name core.ResourceName) bool { + return standardContainerResources.Has(name) || IsHugePageResourceName(name) } // IsExtendedResourceName returns true if: @@ -196,88 +196,88 @@ func IsOvercommitAllowed(name core.ResourceName) bool { !IsHugePageResourceName(name) } -var standardLimitRangeTypes = sets.NewString( - string(core.LimitTypePod), - string(core.LimitTypeContainer), - string(core.LimitTypePersistentVolumeClaim), +var standardLimitRangeTypes = sets.New( + core.LimitTypePod, + core.LimitTypeContainer, + core.LimitTypePersistentVolumeClaim, ) // IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsStorage), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceConfigMaps), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), +func IsStandardLimitRangeType(value core.LimitType) bool { + return standardLimitRangeTypes.Has(value) +} + +var standardQuotaResources = sets.New( + core.ResourceCPU, + core.ResourceMemory, + core.ResourceEphemeralStorage, + core.ResourceRequestsCPU, + core.ResourceRequestsMemory, + core.ResourceRequestsStorage, + core.ResourceRequestsEphemeralStorage, + core.ResourceLimitsCPU, + core.ResourceLimitsMemory, + core.ResourceLimitsEphemeralStorage, + core.ResourcePods, + core.ResourceQuotas, + core.ResourceServices, + core.ResourceReplicationControllers, + core.ResourceSecrets, + core.ResourcePersistentVolumeClaims, + core.ResourceConfigMaps, + core.ResourceServicesNodePorts, + core.ResourceServicesLoadBalancers, ) // IsStandardQuotaResourceName returns true if the resource is known to // the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var standardResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceStorage), - string(core.ResourceRequestsStorage), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), +func IsStandardQuotaResourceName(name core.ResourceName) bool { + return standardQuotaResources.Has(name) || IsQuotaHugePageResourceName(name) +} + +var standardResources = sets.New( + core.ResourceCPU, + core.ResourceMemory, + core.ResourceEphemeralStorage, + core.ResourceRequestsCPU, + core.ResourceRequestsMemory, + core.ResourceRequestsEphemeralStorage, + core.ResourceLimitsCPU, + core.ResourceLimitsMemory, + core.ResourceLimitsEphemeralStorage, + core.ResourcePods, + core.ResourceQuotas, + core.ResourceServices, + core.ResourceReplicationControllers, + core.ResourceSecrets, + core.ResourceConfigMaps, + core.ResourcePersistentVolumeClaims, + core.ResourceStorage, + core.ResourceRequestsStorage, + core.ResourceServicesNodePorts, + core.ResourceServicesLoadBalancers, ) // IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var integerResources = sets.NewString( - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), +func IsStandardResourceName(name core.ResourceName) bool { + return standardResources.Has(name) || IsQuotaHugePageResourceName(name) +} + +var integerResources = sets.New( + core.ResourcePods, + core.ResourceQuotas, + core.ResourceServices, + core.ResourceReplicationControllers, + core.ResourceSecrets, + core.ResourceConfigMaps, + core.ResourcePersistentVolumeClaims, + core.ResourceServicesNodePorts, + core.ResourceServicesLoadBalancers, ) // IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) +func IsIntegerResourceName(name core.ResourceName) bool { + return integerResources.Has(name) || IsExtendedResourceName(name) } // IsServiceIPSet aims to check if the service's ClusterIP is set or not @@ -289,7 +289,7 @@ func IsServiceIPSet(service *core.Service) bool { service.Spec.ClusterIP != core.ClusterIPNone } -var standardFinalizers = sets.NewString( +var standardFinalizers = sets.New( string(core.FinalizerKubernetes), metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go index 8401cb6c3..b32fffa0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go @@ -30,12 +30,22 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// GetPodQOS returns the QoS class of a pod. +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { + if pod.Status.QOSClass != "" { + return pod.Status.QOSClass + } + return ComputePodQOS(pod) +} + +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more +// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. // When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go -func GetPodQOS(pod *core.Pod) core.PodQOSClass { +func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 75c68af62..fa1242b8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -335,6 +335,16 @@ type PersistentVolumeSpec struct { // This field influences the scheduling of pods that use this volume. // +optional NodeAffinity *VolumeNodeAffinity + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -440,7 +450,7 @@ type PersistentVolumeClaimSpec struct { // that are lower than previous value but must still be higher than capacity recorded in the // status field of the claim. // +optional - Resources ResourceRequirements + Resources VolumeResourceRequirements // VolumeName is the binding reference to the PersistentVolume backing this // claim. When set to non-empty value Selector is not evaluated // +optional @@ -488,6 +498,21 @@ type PersistentVolumeClaimSpec struct { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional DataSourceRef *TypedObjectReference + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string } type TypedObjectReference struct { @@ -518,6 +543,11 @@ const ( PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + + // Applying the target VolumeAttributesClass encountered an error + PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" + // Volume is being modified + PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume" ) // +enum @@ -544,6 +574,38 @@ const ( PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" ) +// +enum +// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately +type PersistentVolumeClaimModifyVolumeStatus string + +const ( + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing + PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending" + // InProgress indicates that the volume is being modified + PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress" + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified + PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible" +) + +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +type ModifyVolumeStatus struct { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + TargetVolumeAttributesClassName string + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + Status PersistentVolumeClaimModifyVolumeStatus +} + // PersistentVolumeClaimCondition represents the current condition of PV claim type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType @@ -635,6 +697,18 @@ type PersistentVolumeClaimStatus struct { // +mapType=granular // +optional AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + CurrentVolumeAttributesClassName *string + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + ModifyVolumeStatus *ModifyVolumeStatus } // PersistentVolumeAccessMode defines various access modes for PV. @@ -1685,6 +1759,29 @@ type ServiceAccountTokenProjection struct { Path string } +// ClusterTrustBundleProjection allows a pod to access the +// `.spec.trustBundle` field of a ClusterTrustBundle object in an auto-updating +// file. +type ClusterTrustBundleProjection struct { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with SignerName and LabelSelector. + Name *string + + // Select all ClusterTrustBundles for this signer that match LabelSelector. + // Mutually-exclusive with Name. + SignerName *string + + // Select all ClusterTrustBundles that match this LabelSelecotr. + // Mutually-exclusive with Name. + LabelSelector *metav1.LabelSelector + + // Block pod startup if the selected ClusterTrustBundle(s) aren't available? + Optional *bool + + // Relative path from the volume root to write the bundle. + Path string +} + // ProjectedVolumeSource represents a projected volume source type ProjectedVolumeSource struct { // list of volume projections @@ -1710,6 +1807,8 @@ type VolumeProjection struct { ConfigMap *ConfigMapProjection // information about the serviceAccountToken data to project ServiceAccountToken *ServiceAccountTokenProjection + // information about the ClusterTrustBundle data to project + ClusterTrustBundle *ClusterTrustBundleProjection } // KeyToPath maps a string key to a path within a volume. @@ -1805,10 +1904,8 @@ type CSIPersistentVolumeSource struct { // NodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference } @@ -2150,6 +2247,12 @@ type ExecAction struct { Command []string } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2282,6 +2385,18 @@ type ResourceRequirements struct { Claims []ResourceClaim } +// VolumeResourceRequirements describes the storage resource requirements for a volume. +type VolumeResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + // ResourceClaim references one entry in PodSpec.ResourceClaims. type ResourceClaim struct { // Name must match the name of one entry in pod.spec.resourceClaims of @@ -2420,6 +2535,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction } type GRPCAction struct { @@ -2617,12 +2736,6 @@ const ( PodReady PodConditionType = "Ready" // PodInitialized means that all init containers in the pod have started successfully. PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" - // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler - // skips scheduling the pod because one or more scheduling gates are still present. - PodReasonSchedulingGated = "SchedulingGated" // ContainersReady indicates whether all containers in the pod are ready. ContainersReady PodConditionType = "ContainersReady" // DisruptionTarget indicates the pod is about to be terminated due to a @@ -2886,6 +2999,7 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running. type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector // namespaces specifies a static list of namespace names that the term applies to. @@ -2907,6 +3021,24 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // +listType=atomic + // +optional + MatchLabelKeys []string + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // +listType=atomic + // +optional + MismatchLabelKeys []string } // NodeAffinity is a group of node affinity scheduling rules. @@ -4074,6 +4206,15 @@ type LoadBalancerIngress struct { // +optional Hostname string + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +optional @@ -4310,7 +4451,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -4475,7 +4616,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -4596,7 +4737,7 @@ type NodeSystemInfo struct { ContainerRuntimeVersion string // Kubelet Version reported by the node. KubeletVersion string - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. KubeProxyVersion string // The Operating System reported by the node OperatingSystem string @@ -6146,3 +6287,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 51337fe16..8b645ebf4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/parsers" "k8s.io/utils/pointer" @@ -122,11 +123,9 @@ func SetDefaults_Service(obj *v1.Service) { sp.TargetPort = intstr.FromInt32(sp.Port) } } - // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service + // Defaults ExternalTrafficPolicy field for externally-accessible service // to Global for consistency. - if (obj.Spec.Type == v1.ServiceTypeNodePort || - obj.Spec.Type == v1.ServiceTypeLoadBalancer) && - obj.Spec.ExternalTrafficPolicy == "" { + if service.ExternallyAccessible(obj) && obj.Spec.ExternalTrafficPolicy == "" { obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster } @@ -142,6 +141,19 @@ func SetDefaults_Service(obj *v1.Service) { obj.Spec.AllocateLoadBalancerNodePorts = pointer.Bool(true) } } + + if obj.Spec.Type == v1.ServiceTypeLoadBalancer { + if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) { + ipMode := v1.LoadBalancerIPModeVIP + + for i, ing := range obj.Status.LoadBalancer.Ingress { + if ing.IP != "" && ing.IPMode == nil { + obj.Status.LoadBalancer.Ingress[i].IPMode = &ipMode + } + } + } + } + } func SetDefaults_Pod(obj *v1.Pod) { // If limits are specified, but requests are not, default requests to limits diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index 8a432e8d7..41db42217 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -212,6 +212,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.ClusterTrustBundleProjection)(nil), (*core.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(a.(*v1.ClusterTrustBundleProjection), b.(*core.ClusterTrustBundleProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClusterTrustBundleProjection)(nil), (*v1.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(a.(*core.ClusterTrustBundleProjection), b.(*v1.ClusterTrustBundleProjection), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*v1.ComponentCondition), b.(*core.ComponentCondition), scope) }); err != nil { @@ -882,6 +892,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.ModifyVolumeStatus)(nil), (*core.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(a.(*v1.ModifyVolumeStatus), b.(*core.ModifyVolumeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ModifyVolumeStatus)(nil), (*v1.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(a.(*core.ModifyVolumeStatus), b.(*v1.ModifyVolumeStatus), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*v1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope) }); err != nil { @@ -1937,6 +1957,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SleepAction_To_core_SleepAction(a.(*v1.SleepAction), b.(*core.SleepAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*v1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*v1.SleepAction), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*v1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope) }); err != nil { @@ -2087,6 +2117,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.VolumeResourceRequirements)(nil), (*core.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(a.(*v1.VolumeResourceRequirements), b.(*core.VolumeResourceRequirements), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeResourceRequirements)(nil), (*v1.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(a.(*core.VolumeResourceRequirements), b.(*v1.VolumeResourceRequirements), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*v1.VolumeSource), b.(*core.VolumeSource), scope) }); err != nil { @@ -2735,6 +2775,34 @@ func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, o return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) } +func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.SignerName = (*string)(unsafe.Pointer(in.SignerName)) + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + out.Path = in.Path + return nil +} + +// Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection is an autogenerated conversion function. +func Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error { + return autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in, out, s) +} + +func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.SignerName = (*string)(unsafe.Pointer(in.SignerName)) + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + out.Path = in.Path + return nil +} + +// Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection is an autogenerated conversion function. +func Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error { + return autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in, out, s) +} + func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { out.Type = core.ComponentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) @@ -4315,6 +4383,7 @@ func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHa out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.Sleep = (*core.SleepAction)(unsafe.Pointer(in.Sleep)) return nil } @@ -4327,6 +4396,7 @@ func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.Lifecycle out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.Sleep = (*v1.SleepAction)(unsafe.Pointer(in.Sleep)) return nil } @@ -4478,6 +4548,7 @@ func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scop func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname + out.IPMode = (*core.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode)) out.Ports = *(*[]core.PortStatus)(unsafe.Pointer(&in.Ports)) return nil } @@ -4490,6 +4561,7 @@ func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalan func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { out.IP = in.IP out.Hostname = in.Hostname + out.IPMode = (*v1.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode)) out.Ports = *(*[]v1.PortStatus)(unsafe.Pointer(&in.Ports)) return nil } @@ -4551,6 +4623,28 @@ func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolume return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) } +func autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { + out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName + out.Status = core.PersistentVolumeClaimModifyVolumeStatus(in.Status) + return nil +} + +// Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus is an autogenerated conversion function. +func Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in, out, s) +} + +func autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error { + out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName + out.Status = v1.PersistentVolumeClaimModifyVolumeStatus(in.Status) + return nil +} + +// Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus is an autogenerated conversion function. +func Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error { + return autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in, out, s) +} + func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { out.Server = in.Server out.Path = in.Path @@ -5321,7 +5415,7 @@ func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in * func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + if err := Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName @@ -5329,6 +5423,7 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec( out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) out.DataSourceRef = (*core.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef)) + out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } @@ -5340,7 +5435,7 @@ func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in * func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + if err := Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName @@ -5348,6 +5443,7 @@ func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec( out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) out.DataSourceRef = (*v1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef)) + out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } @@ -5363,6 +5459,8 @@ func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimSta out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) out.AllocatedResourceStatuses = *(*map[core.ResourceName]core.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses)) + out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName)) + out.ModifyVolumeStatus = (*core.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus)) return nil } @@ -5378,6 +5476,8 @@ func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimSta out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources)) out.AllocatedResourceStatuses = *(*map[v1.ResourceName]v1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses)) + out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName)) + out.ModifyVolumeStatus = (*v1.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus)) return nil } @@ -5550,6 +5650,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.Per out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } @@ -5565,6 +5666,7 @@ func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.P out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName)) return nil } @@ -5665,6 +5767,8 @@ func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTe out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys)) + out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys)) return nil } @@ -5678,6 +5782,8 @@ func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinity out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys)) + out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys)) return nil } @@ -8055,6 +8161,26 @@ func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.Ses return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) } +func autoConvert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { + out.Seconds = in.Seconds + return nil +} + +// Convert_v1_SleepAction_To_core_SleepAction is an autogenerated conversion function. +func Convert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { + return autoConvert_v1_SleepAction_To_core_SleepAction(in, out, s) +} + +func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { + out.Seconds = in.Seconds + return nil +} + +// Convert_core_SleepAction_To_v1_SleepAction is an autogenerated conversion function. +func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { + return autoConvert_core_SleepAction_To_v1_SleepAction(in, out, s) +} + func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace @@ -8436,6 +8562,7 @@ func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProje } else { out.ServiceAccountToken = nil } + out.ClusterTrustBundle = (*core.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle)) return nil } @@ -8457,6 +8584,7 @@ func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumePro } else { out.ServiceAccountToken = nil } + out.ClusterTrustBundle = (*v1.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle)) return nil } @@ -8465,6 +8593,28 @@ func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProject return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) } +func autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { + out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements is an autogenerated conversion function. +func Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in, out, s) +} + +func autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error { + out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements is an autogenerated conversion function. +func Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error { + return autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in, out, s) +} + func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go new file mode 100644 index 000000000..398a1cb3a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/names.go @@ -0,0 +1,132 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateSignerName checks that signerName is syntactically valid. +// +// ensure signerName is of the form domain.com/something and up to 571 characters. +// This length and format is specified to accommodate signerNames like: +// /.. +// The max length of a FQDN is 253 characters (DNS1123Subdomain max length) +// The max length of a namespace name is 63 characters (DNS1123Label max length) +// The max length of a resource name is 253 characters (DNS1123Subdomain max length) +// We then add an additional 2 characters to account for the one '.' and one '/'. +func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList { + var el field.ErrorList + if len(signerName) == 0 { + el = append(el, field.Required(fldPath, "")) + return el + } + + segments := strings.Split(signerName, "/") + // validate that there is one '/' in the signerName. + // we do this after validating the domain segment to provide more info to the user. + if len(segments) != 2 { + el = append(el, field.Invalid(fldPath, signerName, "must be a fully qualified domain and path of the form 'example.com/signer-name'")) + // return early here as we should not continue attempting to validate a missing or malformed path segment + // (i.e. one containing multiple or zero `/`) + return el + } + + // validate that segments[0] is less than 253 characters altogether + maxDomainSegmentLength := validation.DNS1123SubdomainMaxLength + if len(segments[0]) > maxDomainSegmentLength { + el = append(el, field.TooLong(fldPath, segments[0], maxDomainSegmentLength)) + } + // validate that segments[0] consists of valid DNS1123 labels separated by '.' + domainLabels := strings.Split(segments[0], ".") + for _, lbl := range domainLabels { + // use IsDNS1123Label as we want to ensure the max length of any single label in the domain + // is 63 characters + if errs := validation.IsDNS1123Label(lbl); len(errs) > 0 { + for _, err := range errs { + el = append(el, field.Invalid(fldPath, segments[0], fmt.Sprintf("validating label %q: %s", lbl, err))) + } + // if we encounter any errors whilst parsing the domain segment, break from + // validation as any further error messages will be duplicates, and non-distinguishable + // from each other, confusing users. + break + } + } + + // validate that there is at least one '.' in segments[0] + if len(domainLabels) < 2 { + el = append(el, field.Invalid(fldPath, segments[0], "should be a domain with at least two segments separated by dots")) + } + + // validate that segments[1] consists of valid DNS1123 subdomains separated by '.'. + pathLabels := strings.Split(segments[1], ".") + for _, lbl := range pathLabels { + // use IsDNS1123Subdomain because it enforces a length restriction of 253 characters + // which is required in order to fit a full resource name into a single 'label' + if errs := validation.IsDNS1123Subdomain(lbl); len(errs) > 0 { + for _, err := range errs { + el = append(el, field.Invalid(fldPath, segments[1], fmt.Sprintf("validating label %q: %s", lbl, err))) + } + // if we encounter any errors whilst parsing the path segment, break from + // validation as any further error messages will be duplicates, and non-distinguishable + // from each other, confusing users. + break + } + } + + // ensure that segments[1] can accommodate a dns label + dns subdomain + '.' + maxPathSegmentLength := validation.DNS1123SubdomainMaxLength + validation.DNS1123LabelMaxLength + 1 + maxSignerNameLength := maxDomainSegmentLength + maxPathSegmentLength + 1 + if len(signerName) > maxSignerNameLength { + el = append(el, field.TooLong(fldPath, signerName, maxSignerNameLength)) + } + + return el +} + +// ValidateClusterTrustBundleName checks that a ClusterTrustBundle name conforms +// to the rules documented on the type. +func ValidateClusterTrustBundleName(signerName string) func(name string, prefix bool) []string { + return func(name string, isPrefix bool) []string { + if signerName == "" { + if strings.Contains(name, ":") { + return []string{"ClusterTrustBundle without signer name must not have \":\" in its name"} + } + return apimachineryvalidation.NameIsDNSSubdomain(name, isPrefix) + } + + requiredPrefix := strings.ReplaceAll(signerName, "/", ":") + ":" + if !strings.HasPrefix(name, requiredPrefix) { + return []string{fmt.Sprintf("ClusterTrustBundle for signerName %s must be named with prefix %s", signerName, requiredPrefix)} + } + return apimachineryvalidation.NameIsDNSSubdomain(strings.TrimPrefix(name, requiredPrefix), isPrefix) + } +} + +func extractSignerNameFromClusterTrustBundleName(name string) (string, bool) { + if splitPoint := strings.LastIndex(name, ":"); splitPoint != -1 { + // This looks like it refers to a signerName trustbundle. + return strings.ReplaceAll(name[:splitPoint], ":", "/"), true + } else { + return "", false + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index cd9cbbb8b..a6f7fef30 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -44,6 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilsysctl "k8s.io/component-helpers/node/util/sysctl" schedulinghelper "k8s.io/component-helpers/scheduling/corev1" kubeletapis "k8s.io/kubelet/pkg/apis" apiservice "k8s.io/kubernetes/pkg/api/service" @@ -104,7 +105,7 @@ var allowedEphemeralContainerFields = map[string]bool{ // The valid values currently are linux, windows. // In future, they can be expanded to values from // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration -var validOS = sets.NewString(string(core.Linux), string(core.Windows)) +var validOS = sets.New(core.Linux, core.Windows) // ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { @@ -387,8 +388,8 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel func ValidateVolumes(volumes []core.Volume, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) (map[string]core.VolumeSource, field.ErrorList) { allErrs := field.ErrorList{} - allNames := sets.String{} - allCreatedPVCs := sets.String{} + allNames := sets.Set[string]{} + allCreatedPVCs := sets.Set[string]{} // Determine which PVCs will be created for this pod. We need // the exact name of the pod for this. Without it, this sanity // check has to be skipped. @@ -1037,7 +1038,7 @@ func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *fie return allErrs } -var validVolumeDownwardAPIFieldPathExpressions = sets.NewString( +var validVolumeDownwardAPIFieldPathExpressions = sets.New( "metadata.name", "metadata.namespace", "metadata.labels", @@ -1085,7 +1086,7 @@ func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSo func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} - allPaths := sets.String{} + allPaths := sets.Set[string]{} for i, source := range projection.Sources { numSources := 0 @@ -1154,6 +1155,69 @@ func validateProjectionSources(projection *core.ProjectedVolumeSource, projectio allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) } } + if projPath := srcPath.Child("clusterTrustBundlePEM"); source.ClusterTrustBundle != nil { + numSources++ + + usingName := source.ClusterTrustBundle.Name != nil + usingSignerName := source.ClusterTrustBundle.SignerName != nil + + switch { + case usingName && usingSignerName: + allErrs = append(allErrs, field.Invalid(projPath, source.ClusterTrustBundle, "only one of name and signerName may be used")) + case usingName: + if *source.ClusterTrustBundle.Name == "" { + allErrs = append(allErrs, field.Required(projPath.Child("name"), "must be a valid object name")) + } + + name := *source.ClusterTrustBundle.Name + if signerName, ok := extractSignerNameFromClusterTrustBundleName(name); ok { + validationFunc := ValidateClusterTrustBundleName(signerName) + errMsgs := validationFunc(name, false) + for _, msg := range errMsgs { + allErrs = append(allErrs, field.Invalid(projPath.Child("name"), name, fmt.Sprintf("not a valid clustertrustbundlename: %v", msg))) + } + } else { + validationFunc := ValidateClusterTrustBundleName("") + errMsgs := validationFunc(name, false) + for _, msg := range errMsgs { + allErrs = append(allErrs, field.Invalid(projPath.Child("name"), name, fmt.Sprintf("not a valid clustertrustbundlename: %v", msg))) + } + } + + if source.ClusterTrustBundle.LabelSelector != nil { + allErrs = append(allErrs, field.Invalid(projPath.Child("labelSelector"), source.ClusterTrustBundle.LabelSelector, "labelSelector must be unset if name is specified")) + } + case usingSignerName: + if *source.ClusterTrustBundle.SignerName == "" { + allErrs = append(allErrs, field.Required(projPath.Child("signerName"), "must be a valid signer name")) + } + + allErrs = append(allErrs, ValidateSignerName(projPath.Child("signerName"), *source.ClusterTrustBundle.SignerName)...) + + labelSelectorErrs := unversionedvalidation.ValidateLabelSelector( + source.ClusterTrustBundle.LabelSelector, + unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, + projPath.Child("labelSelector"), + ) + allErrs = append(allErrs, labelSelectorErrs...) + + default: + allErrs = append(allErrs, field.Required(projPath, "either name or signerName must be specified")) + } + + if source.ClusterTrustBundle.Path == "" { + allErrs = append(allErrs, field.Required(projPath.Child("path"), "")) + } + + allErrs = append(allErrs, validateLocalNonReservedPath(source.ClusterTrustBundle.Path, projPath.Child("path"))...) + + curPath := source.ClusterTrustBundle.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths")) + } + } if numSources > 1 { allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type")) } @@ -1173,21 +1237,21 @@ func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPa return allErrs } -var supportedHostPathTypes = sets.NewString( - string(core.HostPathUnset), - string(core.HostPathDirectoryOrCreate), - string(core.HostPathDirectory), - string(core.HostPathFileOrCreate), - string(core.HostPathFile), - string(core.HostPathSocket), - string(core.HostPathCharDev), - string(core.HostPathBlockDev)) +var supportedHostPathTypes = sets.New( + core.HostPathUnset, + core.HostPathDirectoryOrCreate, + core.HostPathDirectory, + core.HostPathFileOrCreate, + core.HostPathFile, + core.HostPathSocket, + core.HostPathCharDev, + core.HostPathBlockDev) func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { - allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, supportedHostPathTypes.List())) + if hostPathType != nil && !supportedHostPathTypes.Has(*hostPathType) { + allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, sets.List(supportedHostPathTypes))) } return allErrs @@ -1232,9 +1296,13 @@ func validateMountPropagation(mountPropagation *core.MountPropagationMode, conta return allErrs } - supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer), string(core.MountPropagationNone)) - if !supportedMountPropagations.Has(string(*mountPropagation)) { - allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) + supportedMountPropagations := sets.New( + core.MountPropagationBidirectional, + core.MountPropagationHostToContainer, + core.MountPropagationNone) + + if !supportedMountPropagations.Has(*mountPropagation) { + allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, sets.List(supportedMountPropagations))) } if container == nil { @@ -1402,8 +1470,15 @@ func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *f } func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { - var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite)) - var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk)) + var supportedCachingModes = sets.New( + core.AzureDataDiskCachingNone, + core.AzureDataDiskCachingReadOnly, + core.AzureDataDiskCachingReadWrite) + + var supportedDiskKinds = sets.New( + core.AzureSharedBlobDisk, + core.AzureDedicatedBlobDisk, + core.AzureManagedDisk) diskURISupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} diskURISupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} @@ -1417,12 +1492,12 @@ func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) f allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) } - if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) + if azure.CachingMode != nil && !supportedCachingModes.Has(*azure.CachingMode) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, sets.List(supportedCachingModes))) } - if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List())) + if azure.Kind != nil && !supportedDiskKinds.Has(*azure.Kind) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, sets.List(supportedDiskKinds))) } // validate that DiskUri is the correct format @@ -1653,31 +1728,33 @@ var allowedTemplateObjectMetaFields = map[string]bool{ // PersistentVolumeSpecValidationOptions contains the different settings for PeristentVolume validation type PersistentVolumeSpecValidationOptions struct { - // Allow spec to contain the "ReadWiteOncePod" access mode - AllowReadWriteOncePod bool + // Allow users to modify the class of volume attributes + EnableVolumeAttributesClass bool } // ValidatePersistentVolumeName checks that a name is appropriate for a // PersistentVolumeName object. var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain -var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) +var supportedAccessModes = sets.New( + core.ReadWriteOnce, + core.ReadOnlyMany, + core.ReadWriteMany, + core.ReadWriteOncePod) -var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) +var supportedReclaimPolicy = sets.New( + core.PersistentVolumeReclaimDelete, + core.PersistentVolumeReclaimRecycle, + core.PersistentVolumeReclaimRetain) -var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) +var supportedVolumeModes = sets.New(core.PersistentVolumeBlock, core.PersistentVolumeFilesystem) func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions { opts := PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), + EnableVolumeAttributesClass: utilfeature.DefaultMutableFeatureGate.Enabled(features.VolumeAttributesClass), } - if oldPv == nil { - // If there's no old PV, use the options based solely on feature enablement - return opts - } - if helper.ContainsAccessMode(oldPv.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true + if oldPv != nil && oldPv.Spec.VolumeAttributesClassName != nil { + opts.EnableVolumeAttributesClass = true } return opts } @@ -1701,20 +1778,15 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "")) } - expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes) - if opts.AllowReadWriteOncePod { - expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod)) - } - foundReadWriteOncePod, foundNonReadWriteOncePod := false, false for _, mode := range pvSpec.AccessModes { - if !expandedSupportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List())) + if !supportedAccessModes.Has(mode) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, sets.List(supportedAccessModes))) } if mode == core.ReadWriteOncePod { foundReadWriteOncePod = true - } else if supportedAccessModes.Has(string(mode)) { + } else if supportedAccessModes.Has(mode) { foundNonReadWriteOncePod = true } } @@ -1728,7 +1800,7 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri } if _, ok := pvSpec.Capacity[core.ResourceStorage]; !ok || len(pvSpec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("capacity"), pvSpec.Capacity, []string{string(core.ResourceStorage)})) + allErrs = append(allErrs, field.NotSupported(fldPath.Child("capacity"), pvSpec.Capacity, []core.ResourceName{core.ResourceStorage})) } capPath := fldPath.Child("capacity") for r, qty := range pvSpec.Capacity { @@ -1737,14 +1809,14 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri } } - if len(string(pvSpec.PersistentVolumeReclaimPolicy)) > 0 { + if len(pvSpec.PersistentVolumeReclaimPolicy) > 0 { if validateInlinePersistentVolumeSpec { if pvSpec.PersistentVolumeReclaimPolicy != core.PersistentVolumeReclaimRetain { allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeReclaimPolicy"), "may only be "+string(core.PersistentVolumeReclaimRetain)+" in the context of inline volumes")) } } else { - if !supportedReclaimPolicy.Has(string(pvSpec.PersistentVolumeReclaimPolicy)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("persistentVolumeReclaimPolicy"), pvSpec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) + if !supportedReclaimPolicy.Has(pvSpec.PersistentVolumeReclaimPolicy) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("persistentVolumeReclaimPolicy"), pvSpec.PersistentVolumeReclaimPolicy, sets.List(supportedReclaimPolicy))) } } } @@ -1964,11 +2036,23 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "may not specify volumeMode other than "+string(core.PersistentVolumeFilesystem)+" in the context of inline volumes")) } } else { - if !supportedVolumeModes.Has(string(*pvSpec.VolumeMode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *pvSpec.VolumeMode, supportedVolumeModes.List())) + if !supportedVolumeModes.Has(*pvSpec.VolumeMode) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *pvSpec.VolumeMode, sets.List(supportedVolumeModes))) } } } + if pvSpec.VolumeAttributesClassName != nil && opts.EnableVolumeAttributesClass { + if len(*pvSpec.VolumeAttributesClassName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeAttributesClassName"), "an empty string is disallowed")) + } else { + for _, msg := range ValidateClassName(*pvSpec.VolumeAttributesClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("volumeAttributesClassName"), *pvSpec.VolumeAttributesClassName, msg)) + } + } + if pvSpec.CSI == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("csi"), "has to be specified when using volumeAttributesClassName")) + } + } return allErrs } @@ -2003,6 +2087,17 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume, opts Pe allErrs = append(allErrs, validatePvNodeAffinity(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...) } + if !apiequality.Semantic.DeepEqual(oldPv.Spec.VolumeAttributesClassName, newPv.Spec.VolumeAttributesClassName) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update is forbidden when the VolumeAttributesClass feature gate is disabled")) + } + if opts.EnableVolumeAttributesClass { + if oldPv.Spec.VolumeAttributesClassName != nil && newPv.Spec.VolumeAttributesClassName == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update from non-nil value to nil is forbidden")) + } + } + } + return allErrs } @@ -2016,24 +2111,35 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) f } type PersistentVolumeClaimSpecValidationOptions struct { - // Allow spec to contain the "ReadWiteOncePod" access mode - AllowReadWriteOncePod bool // Allow users to recover from previously failing expansion operation EnableRecoverFromExpansionFailure bool // Allow to validate the label value of the label selector AllowInvalidLabelValueInSelector bool + // Allow to validate the API group of the data source and data source reference + AllowInvalidAPIGroupInDataSourceOrRef bool + // Allow users to modify the class of volume attributes + EnableVolumeAttributesClass bool } func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions { opts := PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure), AllowInvalidLabelValueInSelector: false, + EnableVolumeAttributesClass: utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass), } if oldPvc == nil { // If there's no old PVC, use the options based solely on feature enablement return opts } + + // If the old object had an invalid API group in the data source or data source reference, continue to allow it in the new object + opts.AllowInvalidAPIGroupInDataSourceOrRef = allowInvalidAPIGroupInDataSourceOrRef(&oldPvc.Spec) + + if oldPvc.Spec.VolumeAttributesClassName != nil { + // If the old object had a volume attributes class, continue to validate it in the new object. + opts.EnableVolumeAttributesClass = true + } + labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{ AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector, } @@ -2042,11 +2148,6 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum opts.AllowInvalidLabelValueInSelector = true } - if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true - } - if helper.ClaimContainsAllocatedResources(oldPvc) || helper.ClaimContainsAllocatedResourceStatus(oldPvc) { opts.EnableRecoverFromExpansionFailure = true @@ -2056,8 +2157,8 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions { opts := PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), AllowInvalidLabelValueInSelector: false, + EnableVolumeAttributesClass: utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass), } if oldClaimTemplate == nil { // If there's no old PVC template, use the options based solely on feature enablement @@ -2070,13 +2171,20 @@ func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTe // If the old object had an invalid label selector, continue to allow it in the new object opts.AllowInvalidLabelValueInSelector = true } - if helper.ContainsAccessMode(oldClaimTemplate.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true - } return opts } +// allowInvalidAPIGroupInDataSourceOrRef returns true if the spec contains a data source or data source reference with an API group +func allowInvalidAPIGroupInDataSourceOrRef(spec *core.PersistentVolumeClaimSpec) bool { + if spec.DataSource != nil && spec.DataSource.APIGroup != nil { + return true + } + if spec.DataSourceRef != nil && spec.DataSourceRef.APIGroup != nil { + return true + } + return false +} + // ValidatePersistentVolumeClaim validates a PersistentVolumeClaim func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList { allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) @@ -2085,7 +2193,7 @@ func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts Persist } // validateDataSource validates a DataSource/DataSourceRef in a PersistentVolumeClaimSpec -func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path) field.ErrorList { +func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList { allErrs := field.ErrorList{} if len(dataSource.Name) == 0 { @@ -2101,12 +2209,17 @@ func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *fie if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" { allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup")) } + if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef { + for _, errString := range validation.IsDNS1123Subdomain(apiGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString)) + } + } return allErrs } // validateDataSourceRef validates a DataSourceRef in a PersistentVolumeClaimSpec -func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path) field.ErrorList { +func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList { allErrs := field.ErrorList{} if len(dataSourceRef.Name) == 0 { @@ -2122,6 +2235,11 @@ func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *fi if len(apiGroup) == 0 && dataSourceRef.Kind != "PersistentVolumeClaim" { allErrs = append(allErrs, field.Invalid(fldPath, dataSourceRef.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup")) } + if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef { + for _, errString := range validation.IsDNS1123Subdomain(apiGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString)) + } + } if dataSourceRef.Namespace != nil && len(*dataSourceRef.Namespace) > 0 { for _, msg := range ValidateNameFunc(ValidateNamespaceName)(*dataSourceRef.Namespace, false) { @@ -2145,20 +2263,15 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...) } - expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes) - if opts.AllowReadWriteOncePod { - expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod)) - } - foundReadWriteOncePod, foundNonReadWriteOncePod := false, false for _, mode := range spec.AccessModes { - if !expandedSupportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List())) + if !supportedAccessModes.Has(mode) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, sets.List(supportedAccessModes))) } if mode == core.ReadWriteOncePod { foundReadWriteOncePod = true - } else if supportedAccessModes.Has(string(mode)) { + } else if supportedAccessModes.Has(mode) { foundNonReadWriteOncePod = true } } @@ -2172,7 +2285,7 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld } else if errs := ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage))); len(errs) > 0 { allErrs = append(allErrs, errs...) } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceStorage, storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) } if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { @@ -2180,15 +2293,15 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) } } - if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List())) + if spec.VolumeMode != nil && !supportedVolumeModes.Has(*spec.VolumeMode) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, sets.List(supportedVolumeModes))) } if spec.DataSource != nil { - allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"))...) + allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...) } if spec.DataSourceRef != nil { - allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...) + allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...) } if spec.DataSourceRef != nil && spec.DataSourceRef.Namespace != nil && len(*spec.DataSourceRef.Namespace) > 0 { if spec.DataSource != nil { @@ -2201,6 +2314,11 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld "must match dataSourceRef")) } } + if spec.VolumeAttributesClassName != nil && len(*spec.VolumeAttributesClassName) > 0 && opts.EnableVolumeAttributesClass { + for _, msg := range ValidateClassName(*spec.VolumeAttributesClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("volumeAttributesClassName"), *spec.VolumeAttributesClassName, msg)) + } + } return allErrs } @@ -2244,6 +2362,8 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil { newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone } + // lets make sure volume attributes class name is same. + newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone oldSize := oldPvc.Spec.Resources.Requests["storage"] newSize := newPvc.Spec.Resources.Requests["storage"] @@ -2251,7 +2371,7 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) { specDiff := cmp.Diff(oldPvcClone.Spec, newPvcClone.Spec) - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("spec is immutable after creation except resources.requests for bound claims\n%v", specDiff))) + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims\n%v", specDiff))) } if newSize.Cmp(oldSize) < 0 { if !opts.EnableRecoverFromExpansionFailure { @@ -2268,6 +2388,21 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) + if !apiequality.Semantic.DeepEqual(oldPvc.Spec.VolumeAttributesClassName, newPvc.Spec.VolumeAttributesClassName) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update is forbidden when the VolumeAttributesClass feature gate is disabled")) + } + if opts.EnableVolumeAttributesClass { + if oldPvc.Spec.VolumeAttributesClassName != nil { + if newPvc.Spec.VolumeAttributesClassName == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update from non-nil value to nil is forbidden")) + } else if len(*newPvc.Spec.VolumeAttributesClassName) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update from non-nil value to an empty string is forbidden")) + } + } + } + } + return allErrs } @@ -2311,17 +2446,17 @@ func validatePersistentVolumeClaimResourceKey(value string, fldPath *field.Path) // only allowed value is storage if helper.IsNativeResource(core.ResourceName(value)) { if core.ResourceName(value) != core.ResourceStorage { - return append(allErrs, field.NotSupported(fldPath, value, []string{string(core.ResourceStorage)})) + return append(allErrs, field.NotSupported(fldPath, value, []core.ResourceName{core.ResourceStorage})) } } return allErrs } -var resizeStatusSet = sets.NewString(string(core.PersistentVolumeClaimControllerResizeInProgress), - string(core.PersistentVolumeClaimControllerResizeFailed), - string(core.PersistentVolumeClaimNodeResizePending), - string(core.PersistentVolumeClaimNodeResizeInProgress), - string(core.PersistentVolumeClaimNodeResizeFailed)) +var resizeStatusSet = sets.New(core.PersistentVolumeClaimControllerResizeInProgress, + core.PersistentVolumeClaimControllerResizeFailed, + core.PersistentVolumeClaimNodeResizePending, + core.PersistentVolumeClaimNodeResizeInProgress, + core.PersistentVolumeClaimNodeResizeFailed) // ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, validationOpts PersistentVolumeClaimSpecValidationOptions) field.ErrorList { @@ -2345,8 +2480,8 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVo if errs := validatePersistentVolumeClaimResourceKey(k.String(), resizeStatusPath); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if !resizeStatusSet.Has(string(v)) { - allErrs = append(allErrs, field.NotSupported(resizeStatusPath, k, resizeStatusSet.List())) + if !resizeStatusSet.Has(v) { + allErrs = append(allErrs, field.NotSupported(resizeStatusPath, k, sets.List(resizeStatusSet))) continue } } @@ -2361,19 +2496,22 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVo if errs := validateBasicResource(qty, allocPath.Key(string(r))); len(errs) > 0 { allErrs = append(allErrs, errs...) } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), qty, allocPath.Key(string(r)))...) + allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceStorage, qty, allocPath.Key(string(r)))...) } } } return allErrs } -var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP), string(core.ProtocolSCTP)) +var supportedPortProtocols = sets.New( + core.ProtocolTCP, + core.ProtocolUDP, + core.ProtocolSCTP) func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allNames := sets.String{} + allNames := sets.Set[string]{} for i, port := range ports { idxPath := fldPath.Index(i) if len(port.Name) > 0 { @@ -2401,8 +2539,8 @@ func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) fie } if len(port.Protocol) == 0 { allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } else if !supportedPortProtocols.Has(port.Protocol) { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, sets.List(supportedPortProtocols))) } } return allErrs @@ -2426,7 +2564,7 @@ func ValidateEnv(vars []core.EnvVar, fldPath *field.Path, opts PodValidationOpti return allErrs } -var validEnvDownwardAPIFieldPathExpressions = sets.NewString( +var validEnvDownwardAPIFieldPathExpressions = sets.New( "metadata.name", "metadata.namespace", "metadata.uid", @@ -2438,9 +2576,16 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString( "status.podIPs", ) -var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") +var validContainerResourceFieldPathExpressions = sets.New( + "limits.cpu", + "limits.memory", + "limits.ephemeral-storage", + "requests.cpu", + "requests.memory", + "requests.ephemeral-storage", +) -var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.NewString(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI) +var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.New(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI) const hugepagesRequestsPrefixDownwardAPI string = `requests.hugepages-` const hugepagesLimitsPrefixDownwardAPI string = `limits.hugepages-` @@ -2486,7 +2631,7 @@ func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValida return allErrs } -func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { +func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.Set[string], fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fs.APIVersion) == 0 { @@ -2514,7 +2659,7 @@ func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript")) } } else if !expressions.Has(path) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List())) + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, sets.List(*expressions))) return allErrs } @@ -2531,7 +2676,7 @@ func validateDownwardAPIHostIPs(fieldSel *core.ObjectFieldSelector, fldPath *fie return allErrs } -func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, prefixes *sets.String, fldPath *field.Path, volume bool) field.ErrorList { +func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.Set[string], prefixes *sets.Set[string], fldPath *field.Path, volume bool) field.ErrorList { allErrs := field.ErrorList{} if volume && len(fs.ContainerName) == 0 { @@ -2542,14 +2687,14 @@ func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expr // check if the prefix is present foundPrefix := false if prefixes != nil { - for _, prefix := range prefixes.List() { + for _, prefix := range sets.List(*prefixes) { if strings.HasPrefix(fs.Resource, prefix) { foundPrefix = true } } } if !foundPrefix { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) + allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, sets.List(*expressions))) } } allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) @@ -2609,10 +2754,19 @@ func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field. return allErrs } -var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") -var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") -var validContainerResourceDivisorForHugePages = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") -var validContainerResourceDivisorForEphemeralStorage = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") +var validContainerResourceDivisorForCPU = sets.New("1m", "1") +var validContainerResourceDivisorForMemory = sets.New( + "1", + "1k", "1M", "1G", "1T", "1P", "1E", + "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") +var validContainerResourceDivisorForHugePages = sets.New( + "1", + "1k", "1M", "1G", "1T", "1P", "1E", + "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") +var validContainerResourceDivisorForEphemeralStorage = sets.New( + "1", + "1k", "1M", "1G", "1T", "1P", "1E", + "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -2700,7 +2854,7 @@ func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string { func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - mountpoints := sets.NewString() + mountpoints := sets.New[string]() for i, mnt := range mounts { idxPath := fldPath.Index(i) @@ -2747,8 +2901,8 @@ func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]strin func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - devicepath := sets.NewString() - devicename := sets.NewString() + devicepath := sets.New[string]() + devicename := sets.New[string]() for i, dev := range devices { idxPath := fldPath.Index(i) @@ -2795,7 +2949,7 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str func validatePodResourceClaims(podMeta *metav1.ObjectMeta, claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - podClaimNames := sets.NewString() + podClaimNames := sets.New[string]() for i, claim := range claims { allErrs = append(allErrs, validatePodResourceClaim(podMeta, claim, &podClaimNames, fldPath.Index(i))...) } @@ -2805,8 +2959,8 @@ func validatePodResourceClaims(podMeta *metav1.ObjectMeta, claims []core.PodReso // gatherPodResourceClaimNames returns a set of all non-empty // PodResourceClaim.Name values. Validation that those names are valid is // handled by validatePodResourceClaims. -func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String { - podClaimNames := sets.String{} +func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.Set[string] { + podClaimNames := sets.Set[string]{} for _, claim := range claims { if claim.Name != "" { podClaimNames.Insert(claim.Name) @@ -2815,7 +2969,7 @@ func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String { return podClaimNames } -func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList { +func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResourceClaim, podClaimNames *sets.Set[string], fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList if claim.Name == "" { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -2859,52 +3013,52 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field return allErrs } -func validateLivenessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateLivenessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateReadinessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateReadinessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.TerminationGracePeriodSeconds != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes")) } return allErrs } -func validateStartupProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateStartupProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), fldPath)...) + allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), gracePeriod, fldPath)...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) @@ -2927,7 +3081,7 @@ func validateInitContainerRestartPolicy(restartPolicy *core.ContainerRestartPoli case core.ContainerRestartPolicyAlways: break default: - validValues := []string{string(core.ContainerRestartPolicyAlways)} + validValues := []core.ContainerRestartPolicy{core.ContainerRestartPolicyAlways} allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) } @@ -2939,6 +3093,7 @@ type commonHandler struct { HTTPGet *core.HTTPGetAction TCPSocket *core.TCPSocketAction GRPC *core.GRPCAction + Sleep *core.SleepAction } func handlerFromProbe(ph *core.ProbeHandler) commonHandler { @@ -2955,9 +3110,19 @@ func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler { Exec: lh.Exec, HTTPGet: lh.HTTPGet, TCPSocket: lh.TCPSocket, + Sleep: lh.Sleep, } } +func validateSleepAction(sleep *core.SleepAction, gracePeriod int64, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if sleep.Seconds <= 0 || sleep.Seconds > gracePeriod { + invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", gracePeriod) + allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr)) + } + return allErrors +} + func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if config == nil { @@ -2987,7 +3152,7 @@ func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorLis // AccumulateUniqueHostPorts extracts each HostPort of each Container, // accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { +func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.Set[string], fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for ci, ctr := range containers { @@ -3013,7 +3178,7 @@ func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.St // checkHostPortConflicts checks for colliding Port.HostPort values across // a slice of containers. func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList { - allPorts := sets.String{} + allPorts := sets.Set[string]{} return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) } @@ -3025,7 +3190,7 @@ func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorL return allErrors } -var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS)) +var supportedHTTPSchemes = sets.New(core.URISchemeHTTP, core.URISchemeHTTPS) func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} @@ -3033,8 +3198,8 @@ func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field. allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) } allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) - if !supportedHTTPSchemes.Has(string(http.Scheme)) { - allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) + if !supportedHTTPSchemes.Has(http.Scheme) { + allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, sets.List(supportedHTTPSchemes))) } for _, header := range http.HTTPHeaders { for _, msg := range validation.IsHTTPHeaderName(header.Name) { @@ -3066,7 +3231,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList { return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port")) } -func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList { +func validateHandler(handler commonHandler, gracePeriod int64, fldPath *field.Path) field.ErrorList { numHandlers := 0 allErrors := field.ErrorList{} if handler.Exec != nil { @@ -3101,24 +3266,35 @@ func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList allErrors = append(allErrors, validateGRPCAction(handler.GRPC, fldPath.Child("grpc"))...) } } + if handler.Sleep != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("sleep"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateSleepAction(handler.Sleep, gracePeriod, fldPath.Child("sleep"))...) + } + } if numHandlers == 0 { allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) } return allErrors } -func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { +func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), fldPath.Child("postStart"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"))...) } if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), fldPath.Child("preStop"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), gracePeriod, fldPath.Child("preStop"))...) } return allErrs } -var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever)) +var supportedPullPolicies = sets.New( + core.PullAlways, + core.PullIfNotPresent, + core.PullNever) func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} @@ -3129,16 +3305,16 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) + allErrors = append(allErrors, field.NotSupported(fldPath, policy, sets.List(supportedPullPolicies))) } return allErrors } -var supportedResizeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) -var supportedResizePolicies = sets.NewString(string(core.NotRequired), string(core.RestartContainer)) +var supportedResizeResources = sets.New(core.ResourceCPU, core.ResourceMemory) +var supportedResizePolicies = sets.New(core.NotRequired, core.RestartContainer) -func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList { +func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path, podRestartPolicy *core.RestartPolicy) field.ErrorList { allErrors := field.ErrorList{} // validate that resource name is not repeated, supported resource names and policy values are specified @@ -3153,14 +3329,18 @@ func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *fiel case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - allErrors = append(allErrors, field.NotSupported(fldPath, p.ResourceName, supportedResizeResources.List())) + allErrors = append(allErrors, field.NotSupported(fldPath, p.ResourceName, sets.List(supportedResizeResources))) } switch p.RestartPolicy { case core.NotRequired, core.RestartContainer: case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, supportedResizePolicies.List())) + allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, sets.List(supportedResizePolicies))) + } + + if *podRestartPolicy == core.RestartPolicyNever && p.RestartPolicy != core.NotRequired { + allErrors = append(allErrors, field.Invalid(fldPath, p.RestartPolicy, "must be 'NotRequired' when `restartPolicy` is 'Never'")) } } return allErrors @@ -3168,14 +3348,14 @@ func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *fiel // validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers. // Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates. -func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList if len(ephemeralContainers) == 0 { return allErrs } - otherNames, allNames := sets.String{}, sets.String{} + otherNames, allNames := sets.Set[string]{}, sets.Set[string]{} for _, c := range containers { otherNames.Insert(c.Name) allNames.Insert(c.Name) @@ -3189,7 +3369,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, idxPath := fldPath.Index(i) c := (*core.Container)(&ec.EphemeralContainerCommon) - allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts)...) + allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts, podRestartPolicy)...) // Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations // here where we've already converted EphemeralContainerCommon to Container. allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...) @@ -3251,10 +3431,10 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er } // validateInitContainers is called by pod spec and template validation to validate the list of init containers -func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList - allNames := sets.String{} + allNames := sets.Set[string]{} for _, ctr := range regularContainers { allNames.Insert(ctr.Name) } @@ -3262,7 +3442,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor idxPath := fldPath.Index(i) // Apply the validation common to all container types - allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...) + allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts, podRestartPolicy)...) restartAlways := false // Apply the validation specific to init containers @@ -3285,11 +3465,11 @@ func validateInitContainers(containers []core.Container, regularContainers []cor switch { case restartAlways: if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, idxPath.Child("lifecycle"))...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, idxPath.Child("livenessProbe"))...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, idxPath.Child("readinessProbe"))...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, idxPath.Child("startupProbe"))...) default: // These fields are disallowed for init containers. @@ -3317,7 +3497,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor // validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral // container list validation to require a properly formatted name, image, etc. -func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions) field.ErrorList { +func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], path *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList namePath := path.Child("name") @@ -3339,9 +3519,9 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume case "": allErrs = append(allErrs, field.Required(path.Child("terminationMessagePolicy"), "")) default: - supported := []string{ - string(core.TerminationMessageReadFile), - string(core.TerminationMessageFallbackToLogsOnError), + supported := []core.TerminationMessagePolicy{ + core.TerminationMessageReadFile, + core.TerminationMessageFallbackToLogsOnError, } allErrs = append(allErrs, field.NotSupported(path.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, supported)) } @@ -3355,7 +3535,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...) allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...) allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...) - allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"))...) + allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"), podRestartPolicy)...) allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...) return allErrs } @@ -3365,7 +3545,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList // Only make the following checks if hostUsers is false (otherwise, the container uses the // same userns as the host, and so there isn't anything to check). - if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers == true { + if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers { return allErrs } @@ -3389,19 +3569,19 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList } // validateContainers is called by pod spec and template validation to validate the list of regular containers. -func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { allErrs := field.ErrorList{} if len(containers) == 0 { return append(allErrs, field.Required(fldPath, "")) } - allNames := sets.String{} + allNames := sets.Set[string]{} for i, ctr := range containers { path := fldPath.Index(i) // Apply validation common to all containers - allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts)...) + allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts, podRestartPolicy)...) // Container names must be unique within the list of regular containers. // Collisions with init or ephemeral container names will be detected by the init or ephemeral @@ -3417,11 +3597,11 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol // Regular init container and ephemeral container validation will return // field.Forbidden() for these paths. if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, path.Child("lifecycle"))...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, path.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, path.Child("livenessProbe"))...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, path.Child("readinessProbe"))...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, path.Child("startupProbe"))...) // These fields are disallowed for regular containers if ctr.RestartPolicy != nil { @@ -3443,7 +3623,7 @@ func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Pat case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)} + validValues := []core.RestartPolicy{core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever} allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) } @@ -3457,7 +3637,7 @@ func ValidatePreemptionPolicy(preemptionPolicy *core.PreemptionPolicy, fldPath * case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(core.PreemptLowerPriority), string(core.PreemptNever)} + validValues := []core.PreemptionPolicy{core.PreemptLowerPriority, core.PreemptNever} allErrors = append(allErrors, field.NotSupported(fldPath, preemptionPolicy, validValues)) } return allErrors @@ -3470,18 +3650,18 @@ func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.Err case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault), string(core.DNSNone)} + validValues := []core.DNSPolicy{core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault, core.DNSNone} allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) } return allErrors } -var validFSGroupChangePolicies = sets.NewString(string(core.FSGroupChangeOnRootMismatch), string(core.FSGroupChangeAlways)) +var validFSGroupChangePolicies = sets.New(core.FSGroupChangeOnRootMismatch, core.FSGroupChangeAlways) func validateFSGroupChangePolicy(fsGroupPolicy *core.PodFSGroupChangePolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} - if !validFSGroupChangePolicies.Has(string(*fsGroupPolicy)) { - allErrors = append(allErrors, field.NotSupported(fldPath, fsGroupPolicy, validFSGroupChangePolicies.List())) + if !validFSGroupChangePolicies.Has(*fsGroupPolicy) { + allErrors = append(allErrors, field.NotSupported(fldPath, fsGroupPolicy, sets.List(validFSGroupChangePolicies))) } return allErrors } @@ -3508,7 +3688,7 @@ func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *fie func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} // There should be no duplicates in the list of scheduling gates. - seen := sets.String{} + seen := sets.Set[string]{} for i, schedulingGate := range schedulingGates { allErrs = append(allErrs, ValidateQualifiedName(schedulingGate.Name, fldPath.Index(i))...) if seen.Has(schedulingGate.Name) { @@ -3647,12 +3827,12 @@ func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *fie case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute: // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute: default: - validValues := []string{ - string(core.TaintEffectNoSchedule), - string(core.TaintEffectPreferNoSchedule), - string(core.TaintEffectNoExecute), + validValues := []core.TaintEffect{ + core.TaintEffectNoSchedule, + core.TaintEffectPreferNoSchedule, + core.TaintEffectNoExecute, // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. - // string(core.TaintEffectNoScheduleNoAdmit), + // core.TaintEffectNoScheduleNoAdmit, } allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues)) } @@ -3751,7 +3931,7 @@ func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) fie allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) } default: - validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)} + validValues := []core.TolerationOperator{core.TolerationOpEqual, core.TolerationOpExists} allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) } @@ -3798,6 +3978,8 @@ type PodValidationOptions struct { AllowInvalidTopologySpreadConstraintLabelSelector bool // Allow node selector additions for gated pods. AllowMutableNodeSelectorAndNodeAffinity bool + // Allow namespaced sysctls in hostNet and hostIPC pods + AllowNamespacedSysctlsForHostNetAndHostIPC bool // The top-level resource being validated is a Pod, not just a PodSpec // embedded in some other resource. ResourceIsPod bool @@ -3870,7 +4052,7 @@ func validatePodIPs(pod *core.Pod) field.ErrorList { } // There should be no duplicates in list of Pod.PodIPs - seen := sets.String{} // := make(map[string]int) + seen := sets.Set[string]{} // := make(map[string]int) for i, podIP := range pod.Status.PodIPs { if seen.Has(podIP.IP) { allErrs = append(allErrs, field.Duplicate(podIPsField.Index(i), podIP)) @@ -3908,7 +4090,7 @@ func validateHostIPs(pod *core.Pod) field.ErrorList { // - validate for dual stack // - validate for duplication if len(pod.Status.HostIPs) > 1 { - seen := sets.String{} + seen := sets.Set[string]{} hostIPs := make([]string, 0, len(pod.Status.HostIPs)) // There should be no duplicates in list of Pod.HostIPs @@ -3943,13 +4125,19 @@ func validateHostIPs(pod *core.Pod) field.ErrorList { func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} + var gracePeriod int64 + if spec.TerminationGracePeriodSeconds != nil { + // this could happen in tests + gracePeriod = *spec.TerminationGracePeriodSeconds + } + vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts) allErrs = append(allErrs, vErrs...) podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims) allErrs = append(allErrs, validatePodResourceClaims(podMeta, spec.ResourceClaims, fldPath.Child("resourceClaims"))...) - allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...) - allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...) + allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("containers"), opts, &spec.RestartPolicy)...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("initContainers"), opts, &spec.RestartPolicy)...) + allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts, &spec.RestartPolicy)...) allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) @@ -4221,9 +4409,9 @@ func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) // validateTopologySelectorLabelRequirement tests that the specified TopologySelectorLabelRequirement fields has valid data, // and constructs a set containing all of its Values. -func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequirement, fldPath *field.Path) (sets.String, field.ErrorList) { +func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequirement, fldPath *field.Path) (sets.Set[string], field.ErrorList) { allErrs := field.ErrorList{} - valueSet := make(sets.String) + valueSet := make(sets.Set[string]) valuesPath := fldPath.Child("values") if len(rq.Values) == 0 { allErrs = append(allErrs, field.Required(valuesPath, "")) @@ -4244,9 +4432,9 @@ func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequi // ValidateTopologySelectorTerm tests that the specified topology selector term has valid data, // and constructs a map representing the term in raw form. -func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field.Path) (map[string]sets.String, field.ErrorList) { +func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field.Path) (map[string]sets.Set[string], field.ErrorList) { allErrs := field.ErrorList{} - exprMap := make(map[string]sets.String) + exprMap := make(map[string]sets.Set[string]) exprPath := fldPath.Child("matchLabelExpressions") // Allow empty MatchLabelExpressions, in case this field becomes optional in the future. @@ -4329,6 +4517,7 @@ func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, allowInvalidL allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) } } + allErrs = append(allErrs, validateMatchLabelKeysAndMismatchLabelKeys(fldPath, podAffinityTerm.MatchLabelKeys, podAffinityTerm.MismatchLabelKeys, podAffinityTerm.LabelSelector)...) if len(podAffinityTerm.TopologyKey) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty")) } @@ -4470,7 +4659,7 @@ func validateSeccompProfileType(fldPath *field.Path, seccompProfileType core.Sec case "": return field.Required(fldPath, "type is required when seccompProfile is set") default: - return field.NotSupported(fldPath, seccompProfileType, []string{string(core.SeccompProfileTypeLocalhost), string(core.SeccompProfileTypeRuntimeDefault), string(core.SeccompProfileTypeUnconfined)}) + return field.NotSupported(fldPath, seccompProfileType, []core.SeccompProfileType{core.SeccompProfileTypeLocalhost, core.SeccompProfileTypeRuntimeDefault, core.SeccompProfileTypeUnconfined}) } } @@ -4541,10 +4730,10 @@ func IsValidSysctlName(name string) bool { return sysctlContainSlashRegexp.MatchString(name) } -func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList { +func validateSysctls(securityContext *core.PodSecurityContext, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} names := make(map[string]struct{}) - for i, s := range sysctls { + for i, s := range securityContext.Sysctls { if len(s.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) } else if !IsValidSysctlName(s.Name) { @@ -4552,11 +4741,29 @@ func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList } else if _, ok := names[s.Name]; ok { allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("name"), s.Name)) } + if !opts.AllowNamespacedSysctlsForHostNetAndHostIPC { + err := ValidateHostSysctl(s.Name, securityContext, fldPath.Index(i).Child("name")) + if err != nil { + allErrs = append(allErrs, err) + } + } names[s.Name] = struct{}{} } return allErrs } +// ValidateHostSysctl will return error if namespaced sysctls is applied to pod sharing the respective namespaces with the host. +func ValidateHostSysctl(sysctl string, securityContext *core.PodSecurityContext, fldPath *field.Path) *field.Error { + ns, _, _ := utilsysctl.GetNamespace(sysctl) + switch { + case securityContext.HostNetwork && ns == utilsysctl.NetNamespace: + return field.Invalid(fldPath, sysctl, "may not be specified when 'hostNetwork' is true") + case securityContext.HostIPC && ns == utilsysctl.IPCNamespace: + return field.Invalid(fldPath, sysctl, "may not be specified when 'hostIPC' is true") + } + return nil +} + // validatePodSpecSecurityContext verifies the SecurityContext of a PodSpec, // whether that is defined in a Pod or in an embedded PodSpec (e.g. a // Deployment's pod template). @@ -4589,7 +4796,7 @@ func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, sp } if len(securityContext.Sysctls) != 0 { - allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...) + allErrs = append(allErrs, validateSysctls(securityContext, fldPath.Child("sysctls"), opts)...) } if securityContext.FSGroupChangePolicy != nil { @@ -4785,19 +4992,8 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel return allErrs } - //TODO(vinaykul,InPlacePodVerticalScaling): With KEP 2527, we can rely on persistence of PodStatus.QOSClass - // We can use PodStatus.QOSClass instead of GetPodQOS here, in kubelet, and elsewhere, as PodStatus.QOSClass - // does not change once it is bootstrapped in podCreate. This needs to be addressed before beta as a - // separate PR covering all uses of GetPodQOS. With that change, we can drop the below block. - // Ref: https://github.com/kubernetes/kubernetes/pull/102884#discussion_r1093790446 - // Ref: https://github.com/kubernetes/kubernetes/pull/102884/#discussion_r663280487 - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - // reject attempts to change pod qos - oldQoS := qos.GetPodQOS(oldPod) - newQoS := qos.GetPodQOS(newPod) - if newQoS != oldQoS { - allErrs = append(allErrs, field.Invalid(fldPath, newQoS, "Pod QoS is immutable")) - } + if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { + allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable")) } // handle updateable fields by munging those fields prior to deep equal comparison. @@ -4900,6 +5096,11 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone } } + + // Note: Unlike NodeAffinity and NodeSelector, we cannot make PodAffinity/PodAntiAffinity mutable due to the presence of the matchLabelKeys/mismatchLabelKeys feature. + // Those features automatically generate the matchExpressions in labelSelector for PodAffinity/PodAntiAffinity when the Pod is created. + // When we make them mutable, we need to make sure things like how to handle/validate matchLabelKeys, + // and what if the fieldManager/A sets matchexpressions and fieldManager/B sets matchLabelKeys later. (could it lead the understandable conflict, etc) } if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) { @@ -4980,9 +5181,12 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions // validatePodConditions tests if the custom pod conditions are valid. func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - systemConditions := sets.NewString(string(core.PodScheduled), string(core.PodReady), string(core.PodInitialized)) + systemConditions := sets.New( + core.PodScheduled, + core.PodReady, + core.PodInitialized) for i, condition := range conditions { - if systemConditions.Has(string(condition.Type)) { + if systemConditions.Has(condition.Type) { continue } allErrs = append(allErrs, ValidateQualifiedName(string(condition.Type), fldPath.Index(i).Child("Type"))...) @@ -5092,14 +5296,17 @@ func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate, opts PodValidat return allErrs } -var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), - string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) +var supportedSessionAffinityType = sets.New(core.ServiceAffinityClientIP, core.ServiceAffinityNone) +var supportedServiceType = sets.New(core.ServiceTypeClusterIP, core.ServiceTypeNodePort, + core.ServiceTypeLoadBalancer, core.ServiceTypeExternalName) -var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyLocal)) +var supportedServiceInternalTrafficPolicy = sets.New(core.ServiceInternalTrafficPolicyCluster, core.ServiceInternalTrafficPolicyLocal) -var supportedServiceIPFamily = sets.NewString(string(core.IPv4Protocol), string(core.IPv6Protocol)) -var supportedServiceIPFamilyPolicy = sets.NewString(string(core.IPFamilyPolicySingleStack), string(core.IPFamilyPolicyPreferDualStack), string(core.IPFamilyPolicyRequireDualStack)) +var supportedServiceIPFamily = sets.New(core.IPv4Protocol, core.IPv6Protocol) +var supportedServiceIPFamilyPolicy = sets.New( + core.IPFamilyPolicySingleStack, + core.IPFamilyPolicyPreferDualStack, + core.IPFamilyPolicyRequireDualStack) // ValidateService tests if required fields/annotations of a Service are valid. func ValidateService(service *core.Service) field.ErrorList { @@ -5161,7 +5368,7 @@ func ValidateService(service *core.Service) field.ErrorList { } } - allPortNames := sets.String{} + allPortNames := sets.Set[string]{} portsPath := specPath.Child("ports") for i := range service.Spec.Ports { portPath := portsPath.Index(i) @@ -5174,15 +5381,15 @@ func ValidateService(service *core.Service) field.ErrorList { if len(service.Spec.SessionAffinity) == 0 { allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) - } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) + } else if !supportedSessionAffinityType.Has(service.Spec.SessionAffinity) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, sets.List(supportedSessionAffinityType))) } if service.Spec.SessionAffinity == core.ServiceAffinityClientIP { allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) } else if service.Spec.SessionAffinity == core.ServiceAffinityNone { if service.Spec.SessionAffinityConfig != nil { - allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone)))) + allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", core.ServiceAffinityNone))) } } @@ -5203,8 +5410,8 @@ func ValidateService(service *core.Service) field.ErrorList { if len(service.Spec.Type) == 0 { allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) - } else if !supportedServiceType.Has(string(service.Spec.Type)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) + } else if !supportedServiceType.Has(service.Spec.Type) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, sets.List(supportedServiceType))) } if service.Spec.Type == core.ServiceTypeClusterIP { @@ -5290,7 +5497,7 @@ func ValidateService(service *core.Service) field.ErrorList { return allErrs } -func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { +func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.Set[string], fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if requireName && len(sp.Name) == 0 { @@ -5310,8 +5517,8 @@ func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bo if len(sp.Protocol) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(sp.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) + } else if !supportedPortProtocols.Has(sp.Protocol) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, sets.List(supportedPortProtocols))) } allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) @@ -5332,30 +5539,24 @@ func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bo return allErrs } -func needsExternalTrafficPolicy(svc *core.Service) bool { - return svc.Spec.Type == core.ServiceTypeLoadBalancer || svc.Spec.Type == core.ServiceTypeNodePort -} - -var validExternalTrafficPolicies = sets.NewString( - string(core.ServiceExternalTrafficPolicyCluster), - string(core.ServiceExternalTrafficPolicyLocal)) +var validExternalTrafficPolicies = sets.New(core.ServiceExternalTrafficPolicyCluster, core.ServiceExternalTrafficPolicyLocal) func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} fldPath := field.NewPath("spec") - if !needsExternalTrafficPolicy(service) { + if !apiservice.ExternallyAccessible(service) { if service.Spec.ExternalTrafficPolicy != "" { allErrs = append(allErrs, field.Invalid(fldPath.Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, - "may only be set when `type` is 'NodePort' or 'LoadBalancer'")) + "may only be set for externally-accessible services")) } } else { if service.Spec.ExternalTrafficPolicy == "" { allErrs = append(allErrs, field.Required(fldPath.Child("externalTrafficPolicy"), "")) - } else if !validExternalTrafficPolicies.Has(string(service.Spec.ExternalTrafficPolicy)) { + } else if !validExternalTrafficPolicies.Has(service.Spec.ExternalTrafficPolicy) { allErrs = append(allErrs, field.NotSupported(fldPath.Child("externalTrafficPolicy"), - service.Spec.ExternalTrafficPolicy, validExternalTrafficPolicies.List())) + service.Spec.ExternalTrafficPolicy, sets.List(validExternalTrafficPolicies))) } } @@ -5403,8 +5604,8 @@ func validateServiceInternalTrafficFieldsValue(service *core.Service) field.Erro } } - if service.Spec.InternalTrafficPolicy != nil && !supportedServiceInternalTrafficPolicy.Has(string(*service.Spec.InternalTrafficPolicy)) { - allErrs = append(allErrs, field.NotSupported(field.NewPath("spec").Child("internalTrafficPolicy"), *service.Spec.InternalTrafficPolicy, supportedServiceInternalTrafficPolicy.List())) + if service.Spec.InternalTrafficPolicy != nil && !supportedServiceInternalTrafficPolicy.Has(*service.Spec.InternalTrafficPolicy) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("spec").Child("internalTrafficPolicy"), *service.Spec.InternalTrafficPolicy, sets.List(supportedServiceInternalTrafficPolicy))) } return allErrs @@ -5441,7 +5642,7 @@ func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList { // ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) + allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"), &service.Spec)...) return allErrs } @@ -5524,7 +5725,7 @@ func ValidatePodTemplateSpecForRC(template, oldTemplate *core.PodTemplateSpec, s } // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). if template.Spec.RestartPolicy != core.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []core.RestartPolicy{core.RestartPolicyAlways})) } if template.Spec.ActiveDeadlineSeconds != nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicationController is not Supported")) @@ -5612,7 +5813,7 @@ func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *fie func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} - uniqueTaints := map[core.TaintEffect]sets.String{} + uniqueTaints := map[core.TaintEffect]sets.Set[string]{} for i, currTaint := range taints { idxPath := fldPath.Index(i) @@ -5635,7 +5836,7 @@ func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorLis // add taint to existingTaints for uniqueness check if len(uniqueTaints[currTaint.Effect]) == 0 { - uniqueTaints[currTaint.Effect] = sets.String{} + uniqueTaints[currTaint.Effect] = sets.Set[string]{} } uniqueTaints[currTaint.Effect].Insert(currTaint.Key) } @@ -5693,7 +5894,7 @@ func ValidateNode(node *core.Node) field.ErrorList { } // PodCIDRs must not contain duplicates - seen := sets.String{} + seen := sets.Set[string]{} for i, value := range node.Spec.PodCIDRs { if seen.Has(value) { allErrs = append(allErrs, field.Duplicate(podCIDRsField.Index(i), value)) @@ -5713,13 +5914,13 @@ func ValidateNodeResources(node *core.Node) field.ErrorList { // Validate resource quantities in capacity. for k, v := range node.Status.Capacity { resPath := field.NewPath("status", "capacity", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } // Validate resource quantities in allocatable. for k, v := range node.Status.Allocatable { resPath := field.NewPath("status", "allocatable", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } return allErrs } @@ -5905,16 +6106,16 @@ func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, f // Validate compute resource typename. // Refer to docs/design/resources.md for more details. -func validateResourceName(value string, fldPath *field.Path) field.ErrorList { +func validateResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { + for _, msg := range validation.IsQualifiedName(string(value)) { allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) } if len(allErrs) != 0 { return allErrs } - if len(strings.Split(value, "/")) == 1 { + if len(strings.Split(string(value), "/")) == 1 { if !helper.IsStandardResourceName(value) { return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) } @@ -5925,15 +6126,15 @@ func validateResourceName(value string, fldPath *field.Path) field.ErrorList { // Validate container resource name // Refer to docs/design/resources.md for more details. -func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { +func validateContainerResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList { allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { + if len(strings.Split(string(value), "/")) == 1 { if !helper.IsStandardContainerResourceName(value) { return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) } - } else if !helper.IsNativeResource(core.ResourceName(value)) { - if !helper.IsExtendedResourceName(core.ResourceName(value)) { + } else if !helper.IsNativeResource(value) { + if !helper.IsExtendedResourceName(value) { return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard")) } } @@ -5942,10 +6143,10 @@ func validateContainerResourceName(value string, fldPath *field.Path) field.Erro // Validate resource names that can go in a resource quota // Refer to docs/design/resources.md for more details. -func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { +func ValidateResourceQuotaResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList { allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { + if len(strings.Split(string(value), "/")) == 1 { if !helper.IsStandardQuotaResourceName(value) { return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) } @@ -5954,16 +6155,16 @@ func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field. } // Validate limit range types -func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { +func validateLimitRangeTypeName(value core.LimitType, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { + for _, msg := range validation.IsQualifiedName(string(value)) { allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) } if len(allErrs) != 0 { return allErrs } - if len(strings.Split(value, "/")) == 1 { + if len(strings.Split(string(value), "/")) == 1 { if !helper.IsStandardLimitRangeType(value) { return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) } @@ -5974,7 +6175,7 @@ func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorLi // Validate limit range resource name // limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList { +func validateLimitRangeResourceName(limitType core.LimitType, value core.ResourceName, fldPath *field.Path) field.ErrorList { switch limitType { case core.LimitTypePod, core.LimitTypeContainer: return validateContainerResourceName(value, fldPath) @@ -5993,7 +6194,7 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { for i := range limitRange.Spec.Limits { idxPath := fldPath.Index(i) limit := &limitRange.Spec.Limits[i] - allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) + allErrs = append(allErrs, validateLimitRangeTypeName(limit.Type, idxPath.Child("type"))...) _, found := limitTypeSet[limit.Type] if found { @@ -6001,7 +6202,7 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { } limitTypeSet[limit.Type] = true - keys := sets.String{} + keys := sets.Set[string]{} min := map[string]resource.Quantity{} max := map[string]resource.Quantity{} defaults := map[string]resource.Quantity{} @@ -6009,12 +6210,12 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { maxLimitRequestRatios := map[string]resource.Quantity{} for k, q := range limit.Max { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("max").Key(string(k)))...) keys.Insert(string(k)) max[string(k)] = q } for k, q := range limit.Min { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("min").Key(string(k)))...) keys.Insert(string(k)) min[string(k)] = q } @@ -6028,12 +6229,12 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { } } else { for k, q := range limit.Default { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("default").Key(string(k)))...) keys.Insert(string(k)) defaults[string(k)] = q } for k, q := range limit.DefaultRequest { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("defaultRequest").Key(string(k)))...) keys.Insert(string(k)) defaultRequests[string(k)] = q } @@ -6048,7 +6249,7 @@ func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { } for k, q := range limit.MaxLimitRequestRatio { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) keys.Insert(string(k)) maxLimitRequestRatios[string(k)] = q } @@ -6288,7 +6489,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel } // Validates resource requirement spec. -func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} limPath := fldPath.Child("limits") reqPath := fldPath.Child("requests") @@ -6296,15 +6497,15 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl reqContainsCPUOrMemory := false limContainsHugePages := false reqContainsHugePages := false - supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + supportedQoSComputeResources := sets.New(core.ResourceCPU, core.ResourceMemory) for resourceName, quantity := range requirements.Limits { fldPath := limPath.Key(string(resourceName)) // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...) // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...) if helper.IsHugePageResourceName(resourceName) { limContainsHugePages = true @@ -6313,16 +6514,16 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl } } - if supportedQoSComputeResources.Has(string(resourceName)) { + if supportedQoSComputeResources.Has(resourceName) { limContainsCPUOrMemory = true } } for resourceName, quantity := range requirements.Requests { fldPath := reqPath.Key(string(resourceName)) // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + allErrs = append(allErrs, validateContainerResourceName(resourceName, fldPath)...) // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...) // Check that request <= limit. limitQuantity, exists := requirements.Limits[resourceName] @@ -6342,7 +6543,7 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), err.Error())) } } - if supportedQoSComputeResources.Has(string(resourceName)) { + if supportedQoSComputeResources.Has(resourceName) { reqContainsCPUOrMemory = true } @@ -6359,9 +6560,9 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl // validateResourceClaimNames checks that the names in // ResourceRequirements.Claims have a corresponding entry in // PodSpec.ResourceClaims. -func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.String, fldPath *field.Path) field.ErrorList { +func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.Set[string], fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - names := sets.String{} + names := sets.Set[string]{} for i, claim := range claims { name := claim.Name if name == "" { @@ -6381,7 +6582,7 @@ func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets. if len(podClaimNames) == 0 { error.Detail += " which is empty" } else { - error.Detail += ": " + strings.Join(podClaimNames.List(), ", ") + error.Detail += ": " + strings.Join(sets.List(podClaimNames), ", ") } allErrs = append(allErrs, error) } @@ -6408,29 +6609,29 @@ func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld if len(resourceQuotaSpec.Scopes) == 0 { return allErrs } - hardLimits := sets.NewString() + hardLimits := sets.New[core.ResourceName]() for k := range resourceQuotaSpec.Hard { - hardLimits.Insert(string(k)) + hardLimits.Insert(k) } fldPath := fld.Child("scopes") - scopeSet := sets.NewString() + scopeSet := sets.New[core.ResourceQuotaScope]() for _, scope := range resourceQuotaSpec.Scopes { - if !helper.IsStandardResourceQuotaScope(string(scope)) { + if !helper.IsStandardResourceQuotaScope(scope) { allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) } - for _, k := range hardLimits.List() { + for _, k := range sets.List(hardLimits) { if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) { allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) } } - scopeSet.Insert(string(scope)) + scopeSet.Insert(scope) } - invalidScopePairs := []sets.String{ - sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), + invalidScopePairs := []sets.Set[core.ResourceQuotaScope]{ + sets.New(core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort), + sets.New(core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating), } for _, invalidScopePair := range invalidScopePairs { - if scopeSet.HasAll(invalidScopePair.List()...) { + if scopeSet.HasAll(sets.List(invalidScopePair)...) { allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) } } @@ -6440,17 +6641,17 @@ func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld // validateScopedResourceSelectorRequirement tests that the match expressions has valid data func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} - hardLimits := sets.NewString() + hardLimits := sets.New[core.ResourceName]() for k := range resourceQuotaSpec.Hard { - hardLimits.Insert(string(k)) + hardLimits.Insert(k) } fldPath := fld.Child("matchExpressions") - scopeSet := sets.NewString() + scopeSet := sets.New[core.ResourceQuotaScope]() for _, req := range resourceQuotaSpec.ScopeSelector.MatchExpressions { - if !helper.IsStandardResourceQuotaScope(string(req.ScopeName)) { + if !helper.IsStandardResourceQuotaScope(req.ScopeName) { allErrs = append(allErrs, field.Invalid(fldPath.Child("scopeName"), req.ScopeName, "unsupported scope")) } - for _, k := range hardLimits.List() { + for _, k := range sets.List(hardLimits) { if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(req.ScopeName, k) { allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.ScopeSelector, "unsupported scope applied to resource")) } @@ -6477,14 +6678,14 @@ func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQ default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator")) } - scopeSet.Insert(string(req.ScopeName)) + scopeSet.Insert(req.ScopeName) } - invalidScopePairs := []sets.String{ - sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), + invalidScopePairs := []sets.Set[core.ResourceQuotaScope]{ + sets.New(core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort), + sets.New(core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating), } for _, invalidScopePair := range invalidScopePairs { - if scopeSet.HasAll(invalidScopePair.List()...) { + if scopeSet.HasAll(sets.List(invalidScopePair)...) { allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) } } @@ -6518,14 +6719,14 @@ func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Pa fldPath := fld.Child("hard") for k, v := range status.Hard { resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } fldPath = fld.Child("used") for k, v := range status.Used { resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } return allErrs @@ -6537,8 +6738,8 @@ func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *f fldPath := fld.Child("hard") for k, v := range resourceQuotaSpec.Hard { resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) @@ -6548,7 +6749,7 @@ func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *f } // ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource -func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { +func ValidateResourceQuantityValue(resource core.ResourceName, value resource.Quantity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) if helper.IsIntegerResourceName(resource) { @@ -6566,8 +6767,8 @@ func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.Resour // ensure scopes cannot change, and that resources are still valid for scope fldPath := field.NewPath("spec", "scopes") - oldScopes := sets.NewString() - newScopes := sets.NewString() + oldScopes := sets.New[string]() + newScopes := sets.New[string]() for _, scope := range newResourceQuota.Spec.Scopes { newScopes.Insert(string(scope)) } @@ -6590,14 +6791,14 @@ func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core. fldPath := field.NewPath("status", "hard") for k, v := range newResourceQuota.Status.Hard { resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } fldPath = field.NewPath("status", "used") for k, v := range newResourceQuota.Status.Used { resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...) } return allErrs } @@ -6771,8 +6972,8 @@ func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *fi } if len(port.Protocol) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } else if !supportedPortProtocols.Has(port.Protocol) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, sets.List(supportedPortProtocols))) } if port.AppProtocol != nil { allErrs = append(allErrs, ValidateQualifiedName(*port.AppProtocol, fldPath.Child("appProtocol"))...) @@ -7022,9 +7223,8 @@ func validateOS(podSpec *core.PodSpec, fldPath *field.Path, opts PodValidationOp if len(os.Name) == 0 { return append(allErrs, field.Required(fldPath.Child("name"), "cannot be empty")) } - osName := string(os.Name) - if !validOS.Has(osName) { - allErrs = append(allErrs, field.NotSupported(fldPath, osName, validOS.List())) + if !validOS.Has(os.Name) { + allErrs = append(allErrs, field.NotSupported(fldPath, os.Name, sets.List(validOS))) } return allErrs } @@ -7048,22 +7248,42 @@ func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { return allErrs } +var ( + supportedLoadBalancerIPMode = sets.New(core.LoadBalancerIPModeVIP, core.LoadBalancerIPModeProxy) +) + // ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { +func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path, spec *core.ServiceSpec) field.ErrorList { allErrs := field.ErrorList{} - for i, ingress := range status.Ingress { - idxPath := fldPath.Child("ingress").Index(i) - if len(ingress.IP) > 0 { - if isIP := (netutils.ParseIPSloppy(ingress.IP) != nil); !isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) + ingrPath := fldPath.Child("ingress") + if !utilfeature.DefaultFeatureGate.Enabled(features.AllowServiceLBStatusOnNonLB) && spec.Type != core.ServiceTypeLoadBalancer && len(status.Ingress) != 0 { + allErrs = append(allErrs, field.Forbidden(ingrPath, "may only be used when `spec.type` is 'LoadBalancer'")) + } else { + for i, ingress := range status.Ingress { + idxPath := ingrPath.Index(i) + if len(ingress.IP) > 0 { + if isIP := (netutils.ParseIPSloppy(ingress.IP) != nil); !isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) + } } - } - if len(ingress.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) + + if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) && ingress.IPMode == nil { + if len(ingress.IP) > 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("ipMode"), "must be specified when `ip` is set")) + } + } else if ingress.IPMode != nil && len(ingress.IP) == 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("ipMode"), "may not be specified when `ip` is not set")) + } else if ingress.IPMode != nil && !supportedLoadBalancerIPMode.Has(*ingress.IPMode) { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("ipMode"), ingress.IPMode, sets.List(supportedLoadBalancerIPMode))) } - if isIP := (netutils.ParseIPSloppy(ingress.Hostname) != nil); isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) + + if len(ingress.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) + } + if isIP := (netutils.ParseIPSloppy(ingress.Hostname) != nil); isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) + } } } } @@ -7115,12 +7335,12 @@ func ValidateProcMountType(fldPath *field.Path, procMountType core.ProcMountType case core.DefaultProcMount, core.UnmaskedProcMount: return nil default: - return field.NotSupported(fldPath, procMountType, []string{string(core.DefaultProcMount), string(core.UnmaskedProcMount)}) + return field.NotSupported(fldPath, procMountType, []core.ProcMountType{core.DefaultProcMount, core.UnmaskedProcMount}) } } var ( - supportedScheduleActions = sets.NewString(string(core.DoNotSchedule), string(core.ScheduleAnyway)) + supportedScheduleActions = sets.New(core.DoNotSchedule, core.ScheduleAnyway) ) // validateTopologySpreadConstraints validates given TopologySpreadConstraints. @@ -7149,7 +7369,7 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai if err := validateNodeInclusionPolicy(subFldPath.Child("nodeTaintsPolicy"), constraint.NodeTaintsPolicy); err != nil { allErrs = append(allErrs, err) } - allErrs = append(allErrs, validateMatchLabelKeys(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...) + allErrs = append(allErrs, validateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...) if !opts.AllowInvalidTopologySpreadConstraintLabelSelector { allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...) } @@ -7177,7 +7397,7 @@ func validateMinDomains(fldPath *field.Path, minDomains *int32, action core.Unsa } // When MinDomains is non-nil, whenUnsatisfiable must be DoNotSchedule. if action != core.DoNotSchedule { - allErrs = append(allErrs, field.Invalid(fldPath, minDomains, fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", string(core.DoNotSchedule), string(action)))) + allErrs = append(allErrs, field.Invalid(fldPath, minDomains, fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", core.DoNotSchedule, action))) } return allErrs } @@ -7192,8 +7412,8 @@ func ValidateTopologyKey(fldPath *field.Path, topologyKey string) *field.Error { // ValidateWhenUnsatisfiable tests that the argument is a valid UnsatisfiableConstraintAction. func ValidateWhenUnsatisfiable(fldPath *field.Path, action core.UnsatisfiableConstraintAction) *field.Error { - if !supportedScheduleActions.Has(string(action)) { - return field.NotSupported(fldPath, action, supportedScheduleActions.List()) + if !supportedScheduleActions.Has(action) { + return field.NotSupported(fldPath, action, sets.List(supportedScheduleActions)) } return nil } @@ -7211,7 +7431,7 @@ func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.Topo } var ( - supportedPodTopologySpreadNodePolicies = sets.NewString(string(core.NodeInclusionPolicyIgnore), string(core.NodeInclusionPolicyHonor)) + supportedPodTopologySpreadNodePolicies = sets.New(core.NodeInclusionPolicyIgnore, core.NodeInclusionPolicyHonor) ) // validateNodeAffinityPolicy tests that the argument is a valid NodeInclusionPolicy. @@ -7220,20 +7440,72 @@ func validateNodeInclusionPolicy(fldPath *field.Path, policy *core.NodeInclusion return nil } - if !supportedPodTopologySpreadNodePolicies.Has(string(*policy)) { - return field.NotSupported(fldPath, policy, supportedPodTopologySpreadNodePolicies.List()) + if !supportedPodTopologySpreadNodePolicies.Has(*policy) { + return field.NotSupported(fldPath, policy, sets.List(supportedPodTopologySpreadNodePolicies)) } return nil } -// validateMatchLabelKeys tests that the elements are a valid label name and are not already included in labelSelector. -func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { +// validateMatchLabelKeysAndMismatchLabelKeys checks if both matchLabelKeys and mismatchLabelKeys are valid. +// - validate that all matchLabelKeys and mismatchLabelKeys are valid label names. +// - validate that the user doens't specify the same key in both matchLabelKeys and labelSelector. +// - validate that any matchLabelKeys are not duplicated with mismatchLabelKeys. +func validateMatchLabelKeysAndMismatchLabelKeys(fldPath *field.Path, matchLabelKeys, mismatchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { + var allErrs field.ErrorList + // 1. validate that all matchLabelKeys and mismatchLabelKeys are valid label names. + allErrs = append(allErrs, validateLabelKeys(fldPath.Child("matchLabelKeys"), matchLabelKeys, labelSelector)...) + allErrs = append(allErrs, validateLabelKeys(fldPath.Child("mismatchLabelKeys"), mismatchLabelKeys, labelSelector)...) + + // 2. validate that the user doens't specify the same key in both matchLabelKeys and labelSelector. + // It doesn't make sense to have the labelselector with the key specified in matchLabelKeys + // because the matchLabelKeys will be `In` labelSelector which matches with only one value in the key + // and we cannot make any further filtering with that key. + // On the other hand, we may want to have labelSelector with the key specified in mismatchLabelKeys. + // because the mismatchLabelKeys will be `NotIn` labelSelector + // and we may want to filter Pods further with other labelSelector with that key. + + // labelKeysMap is keyed by label key and valued by the index of label key in labelKeys. + if labelSelector != nil { + labelKeysMap := map[string]int{} + for i, key := range matchLabelKeys { + labelKeysMap[key] = i + } + labelSelectorKeys := sets.New[string]() + for key := range labelSelector.MatchLabels { + labelSelectorKeys.Insert(key) + } + for _, matchExpression := range labelSelector.MatchExpressions { + key := matchExpression.Key + if i, ok := labelKeysMap[key]; ok && labelSelectorKeys.Has(key) { + // Before validateLabelKeysWithSelector is called, the labelSelector has already got the selector created from matchLabelKeys. + // Here, we found the duplicate key in labelSelector and the key is specified in labelKeys. + // Meaning that the same key is specified in both labelSelector and matchLabelKeys/mismatchLabelKeys. + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), key, "exists in both matchLabelKeys and labelSelector")) + } + + labelSelectorKeys.Insert(key) + } + } + + // 3. validate that any matchLabelKeys are not duplicated with mismatchLabelKeys. + mismatchLabelKeysSet := sets.New(mismatchLabelKeys...) + for i, k := range matchLabelKeys { + if mismatchLabelKeysSet.Has(k) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("matchLabelKeys").Index(i), k, "exists in both matchLabelKeys and mismatchLabelKeys")) + } + } + + return allErrs +} + +// validateMatchLabelKeysInTopologySpread tests that the elements are a valid label name and are not already included in labelSelector. +func validateMatchLabelKeysInTopologySpread(fldPath *field.Path, matchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { if len(matchLabelKeys) == 0 { return nil } var allErrs field.ErrorList - labelSelectorKeys := sets.String{} + labelSelectorKeys := sets.Set[string]{} if labelSelector != nil { for key := range labelSelector.MatchLabels { @@ -7256,6 +7528,25 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS return allErrs } +// validateLabelKeys tests that the label keys are a valid label name. +// It's intended to be used for matchLabelKeys or mismatchLabelKeys. +func validateLabelKeys(fldPath *field.Path, labelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { + if len(labelKeys) == 0 { + return nil + } + + if labelSelector == nil { + return field.ErrorList{field.Forbidden(fldPath, "must not be specified when labelSelector is not set")} + } + + var allErrs field.ErrorList + for i, key := range labelKeys { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...) + } + + return allErrs +} + // ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,, // .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used // during IP init and allocation. @@ -7292,24 +7583,24 @@ func ValidateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorLi // ipfamilies stand alone validation // must be either IPv4 or IPv6 - seen := sets.String{} + seen := sets.Set[core.IPFamily]{} for i, ipFamily := range service.Spec.IPFamilies { - if !supportedServiceIPFamily.Has(string(ipFamily)) { - allErrs = append(allErrs, field.NotSupported(ipFamiliesField.Index(i), ipFamily, supportedServiceIPFamily.List())) + if !supportedServiceIPFamily.Has(ipFamily) { + allErrs = append(allErrs, field.NotSupported(ipFamiliesField.Index(i), ipFamily, sets.List(supportedServiceIPFamily))) } // no duplicate check also ensures that ipfamilies is dualstacked, in any order - if seen.Has(string(ipFamily)) { + if seen.Has(ipFamily) { allErrs = append(allErrs, field.Duplicate(ipFamiliesField.Index(i), ipFamily)) } - seen.Insert(string(ipFamily)) + seen.Insert(ipFamily) } // IPFamilyPolicy stand alone validation // note: nil is ok, defaulted in alloc check registry/core/service/* if service.Spec.IPFamilyPolicy != nil { // must have a supported value - if !supportedServiceIPFamilyPolicy.Has(string(*(service.Spec.IPFamilyPolicy))) { - allErrs = append(allErrs, field.NotSupported(ipFamilyPolicyField, service.Spec.IPFamilyPolicy, supportedServiceIPFamilyPolicy.List())) + if !supportedServiceIPFamilyPolicy.Has(*(service.Spec.IPFamilyPolicy)) { + allErrs = append(allErrs, field.NotSupported(ipFamilyPolicyField, service.Spec.IPFamilyPolicy, sets.List(supportedServiceIPFamilyPolicy))) } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 471fdbd6f..d92025f00 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -466,6 +466,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignerName != nil { + in, out := &in.SignerName, &out.SignerName + *out = new(string) + **out = **in + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection. +func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection { + if in == nil { + return nil + } + out := new(ClusterTrustBundleProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { *out = *in @@ -2045,6 +2081,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -2230,6 +2271,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -2310,6 +2356,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus. +func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus { + if in == nil { + return nil + } + out := new(ModifyVolumeStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { *out = *in @@ -3058,6 +3120,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3107,6 +3174,16 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val } } + if in.CurrentVolumeAttributesClassName != nil { + in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName + *out = new(string) + **out = **in + } + if in.ModifyVolumeStatus != nil { + in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus + *out = new(ModifyVolumeStatus) + **out = **in + } return } @@ -3349,6 +3426,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = new(VolumeNodeAffinity) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3474,6 +3556,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5666,6 +5758,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in @@ -6012,6 +6120,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ServiceAccountTokenProjection) **out = **in } + if in.ClusterTrustBundle != nil { + in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle + *out = new(ClusterTrustBundleProjection) + (*in).DeepCopyInto(*out) + } return } @@ -6025,6 +6138,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements. +func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements { + if in == nil { + return nil + } + out := new(VolumeResourceRequirements) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/register.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/register.go index 570a6a4db..bc2c7b689 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/register.go @@ -52,10 +52,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 9ec17540b..61747e9b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -18,7 +18,6 @@ package networking import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" ) @@ -599,71 +598,6 @@ type ServiceBackendPort struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta - - metav1.ObjectMeta - - Spec ClusterCIDRSpec -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *api.NodeSelector - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterCIDRList contains a list of ClusterCIDRs. -type ClusterCIDRList struct { - metav1.TypeMeta - - // +optional - metav1.ListMeta - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -695,9 +629,6 @@ type ParentReference struct { Namespace string // Name is the name of the object being referenced. Name string - // UID is the uid of the object being referenced. - // +optional - UID types.UID } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -711,3 +642,53 @@ type IPAddressList struct { // Items is the list of IPAddress Items []IPAddress } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus +} + +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + CIDRs []string +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + Conditions []metav1.Condition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta + // items is the list of ServiceCIDRs. + Items []ServiceCIDR +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go index 3a39c6cac..3f5eeceef 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go @@ -28,87 +28,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { - if in == nil { - return nil - } - out := new(ClusterCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { - if in == nil { - return nil - } - out := new(ClusterCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(core.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { - if in == nil { - return nil - } - out := new(ClusterCIDRSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -904,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 1173090b1..edd33feb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -44,6 +44,13 @@ const ( // Enable usage of Provision of PVCs from snapshots in other namespaces CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource" + // owner: @thockin + // deprecated: v1.29 + // + // Enables Service.status.ingress.loadBanace to be set on + // services of types other than LoadBalancer. + AllowServiceLBStatusOnNonLB featuregate.Feature = "AllowServiceLBStatusOnNonLB" + // owner: @bswartz // alpha: v1.18 // beta: v1.24 @@ -65,6 +72,7 @@ const ( // owner: @danwinship // alpha: v1.27 + // beta: v1.29 // // Enables dual-stack --node-ip in kubelet with external cloud providers CloudDualStackNodeIPs featuregate.Feature = "CloudDualStackNodeIPs" @@ -75,6 +83,12 @@ const ( // Enable ClusterTrustBundle objects and Kubelet integration. ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle" + // owner: @ahmedtd + // alpha: v1.28 + // + // Enable ClusterTrustBundle Kubelet projected volumes. Depends on ClusterTrustBundle. + ClusterTrustBundleProjection featuregate.Feature = "ClusterTrustBundleProjection" + // owner: @szuecs // alpha: v1.12 // @@ -134,7 +148,8 @@ const ( // owner: @mfordjody // alpha: v1.26 // - // Skip validation Enable in next version + // Bypasses obsolete validation that GCP volumes are read-only when used in + // Deployments. SkipReadOnlyValidationGCE featuregate.Feature = "SkipReadOnlyValidationGCE" // owner: @trierra @@ -150,17 +165,11 @@ const ( // Enables the RBD in-tree driver to RBD CSI Driver migration feature. CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD" - // owner: @divyenpatel - // beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u2, HW Version: VM version 15) - // GA: 1.26 - // Enables the vSphere in-tree driver to vSphere CSI Driver migration feature. - CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere" - // owner: @humblec, @zhucan // kep: https://kep.k8s.io/3171 // alpha: v1.25 // beta: v1.27 - // + // GA: v1.29 // Enables SecretRef field in CSI NodeExpandVolume request. CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret" @@ -170,6 +179,14 @@ const ( // Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it. CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth" + // owner: @seans3 + // kep: http://kep.k8s.io/4006 + // alpha: v1.29 + // + // Enables StreamTranslator proxy to handle WebSockets upgrade requests for the + // version of the RemoteCommand subprotocol that supports the "close" signal. + TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests" + // owner: @nckturner // kep: http://kep.k8s.io/2699 // alpha: v1.27 @@ -194,15 +211,6 @@ const ( // Set the scheduled time as an annotation in the job. CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation" - // owner: @deejross, @soltysh - // kep: https://kep.k8s.io/3140 - // alpha: v1.24 - // beta: v1.25 - // GA: 1.27 - // - // Enables support for time zones in CronJobs. - CronJobTimeZone featuregate.Feature = "CronJobTimeZone" - // owner: @thockin // deprecated: v1.28 // @@ -215,29 +223,30 @@ const ( // owner: @elezar // kep: http://kep.k8s.io/4009 // alpha: v1.28 + // beta: v1.29 // // Add support for CDI Device IDs in the Device Plugin API. DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices" // owner: @andrewsykim // alpha: v1.22 + // beta: v1.29 // // Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag. DisableCloudProviders featuregate.Feature = "DisableCloudProviders" // owner: @andrewsykim // alpha: v1.23 + // beta: v1.29 // // Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials. DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders" - // owner: @derekwaynecarr - // alpha: v1.20 - // beta: v1.21 (off by default until 1.22) - // ga: v1.27 - // - // Enables usage of hugepages- in downward API. - DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages" + // owner: @HirazawaUi + // kep: http://kep.k8s.io/4004 + // alpha: v1.29 + // DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node + DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion" // owner: @pohly // kep: http://kep.k8s.io/3063 @@ -280,15 +289,6 @@ const ( // This flag used to be needed for dockershim CRI and currently does nothing. ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting" - // owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev - // kep: https://kep.k8s.io/2727 - // alpha: v1.23 - // beta: v1.24 - // stable: v1.27 - // - // Enables GRPC probe method for {Liveness,Readiness,Startup}Probe. - GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe" - // owner: @bobbypage // alpha: v1.20 // beta: v1.21 @@ -385,19 +385,11 @@ const ( // owner: @mimowo // kep: https://kep.k8s.io/3850 // alpha: v1.28 + // beta: v1.29 // // Allows users to specify counting of failed pods per index. JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex" - // owner: @ahg - // beta: v1.23 - // stable: v1.27 - // - // Allow updating node scheduling directives in the pod template of jobs. Specifically, - // node affinity, selector and tolerations. This is allowed only for suspended jobs - // that have never been unsuspended before. - JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives" - // owner: @mimowo // kep: https://kep.k8s.io/3329 // alpha: v1.25 @@ -410,6 +402,7 @@ const ( // owner: @kannon92 // kep : https://kep.k8s.io/3939 // alpha: v1.28 + // beta: v1.29 // // Allow users to specify recreating pods of a job only when // pods have fully terminated. @@ -421,17 +414,6 @@ const ( // Track the number of pods with Ready condition in the Job status. JobReadyPods featuregate.Feature = "JobReadyPods" - // owner: @alculquicondor - // alpha: v1.22 - // beta: v1.23 - // stable: v1.26 - // - // Track Job completion without relying on Pod remaining in the cluster - // indefinitely. Pod finalizers, in addition to a field in the Job status - // allow the Job controller to keep track of Pods that it didn't account for - // yet. - JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers" - // owner: @marquiz // kep: http://kep.k8s.io/4033 // alpha: v1.28 @@ -478,6 +460,12 @@ const ( // Enable POD resources API to return allocatable resources KubeletPodResourcesGetAllocatable featuregate.Feature = "KubeletPodResourcesGetAllocatable" + // KubeletSeparateDiskGC enables Kubelet to garbage collection images/containers on different filesystems + // owner: @kannon92 + // kep: https://kep.k8s.io/4191 + // alpha: v1.29 + KubeletSeparateDiskGC featuregate.Feature = "KubeletSeparateDiskGC" + // owner: @sallyom // kep: https://kep.k8s.io/2832 // alpha: v1.25 @@ -494,14 +482,6 @@ const ( // `externalTrafficPolicy: Cluster` services. KubeProxyDrainingTerminatingNodes featuregate.Feature = "KubeProxyDrainingTerminatingNodes" - // owner: @zshihang - // kep: https://kep.k8s.io/2800 - // beta: v1.24 - // ga: v1.26 - // - // Stop auto-generation of secret-based service account tokens. - LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration" - // owner: @zshihang // kep: http://kep.k8s.io/2800 // alpha: v1.26 @@ -513,6 +493,7 @@ const ( // owner: @yt2985 // kep: http://kep.k8s.io/2800 // alpha: v1.28 + // beta: v1.29 // // Enables cleaning up of secret-based service account tokens. LegacyServiceAccountTokenCleanUp featuregate.Feature = "LegacyServiceAccountTokenCleanUp" @@ -531,6 +512,13 @@ const ( // Enables scaling down replicas via logarithmic comparison of creation/ready timestamps LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown" + // owner: @sanposhiho + // kep: https://kep.k8s.io/3633 + // alpha: v1.29 + // + // Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity. + MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity" + // owner: @denkensk // kep: https://kep.k8s.io/3243 // alpha: v1.25 @@ -574,13 +562,6 @@ const ( // Enables new performance-improving code in kube-proxy iptables mode MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore" - // owner: @sarveshr7 - // kep: https://kep.k8s.io/2593 - // alpha: v1.25 - // - // Enables the MultiCIDR Range allocator. - MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" - // owner: @aojea // kep: https://kep.k8s.io/1880 // alpha: v1.27 @@ -595,6 +576,13 @@ const ( // Robust VolumeManager reconstruction after kubelet restart. NewVolumeManagerReconstruction featuregate.Feature = "NewVolumeManagerReconstruction" + // owner: @danwinship + // kep: https://kep.k8s.io/3866 + // alpha: v1.29 + // + // Allows running kube-proxy with `--mode nftables`. + NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode" + // owner: @aravindhp @LorbusChris // kep: http://kep.k8s.io/2271 // alpha: v1.27 @@ -664,8 +652,9 @@ const ( // Set pod completion index as a pod label for Indexed Jobs. PodIndexLabel featuregate.Feature = "PodIndexLabel" - // owner: @ddebroy + // owner: @ddebroy, @kannon92 // alpha: v1.25 + // beta: v1.29 // // Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod // sandbox creation and network configuration completes successfully @@ -674,10 +663,18 @@ const ( // owner: @wzshiming // kep: http://kep.k8s.io/2681 // alpha: v1.28 + // beta: v1.29 // // Adds pod.status.hostIPs and downward API PodHostIPs featuregate.Feature = "PodHostIPs" + // owner: @AxeZhan + // kep: http://kep.k8s.io/3960 + // alpha: v1.29 + // + // Enables SleepAction in container lifecycle hooks + PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction" + // owner: @Huang-Wei // kep: https://kep.k8s.io/3521 // alpha: v1.26 @@ -686,14 +683,6 @@ const ( // Enable users to specify when a Pod is ready for scheduling. PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness" - // owner: @rphillips - // alpha: v1.21 - // beta: v1.22 - // ga: v1.28 - // - // Allows user to override pod-level terminationGracePeriod for probes - ProbeTerminationGracePeriod featuregate.Feature = "ProbeTerminationGracePeriod" - // owner: @jessfraz // alpha: v1.12 // @@ -720,6 +709,7 @@ const ( // kep: https://kep.k8s.io/2485 // alpha: v1.22 // beta: v1.27 + // GA: v1.29 // // Enables usage of the ReadWriteOncePod PersistentVolume access mode. ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod" @@ -731,15 +721,6 @@ const ( // Allow users to recover from volume expansion failure RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure" - // owner: @RomanBednar - // kep: https://kep.k8s.io/3333 - // alpha: v1.25 - // beta: 1.26 - // stable: v1.28 - // - // Allow assigning StorageClass to unbound PVCs retroactively - RetroactiveDefaultStorageClass featuregate.Feature = "RetroactiveDefaultStorageClass" - // owner: @mikedanese // alpha: v1.7 // beta: v1.12 @@ -749,6 +730,13 @@ const ( // certificate as expiration approaches. RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate" + // owner: @kiashok + // kep: https://kep.k8s.io/4216 + // alpha: v1.29 + // + // Adds support to pull images based on the runtime class specified. + RuntimeClassInImageCriAPI featuregate.Feature = "RuntimeClassInImageCriApi" + // owner: @danielvegamyhre // kep: https://kep.k8s.io/2413 // beta: v1.27 @@ -759,22 +747,13 @@ const ( ElasticIndexedJob featuregate.Feature = "ElasticIndexedJob" // owner: @sanposhiho - // kep: http://kep.k8s.io/3063 + // kep: http://kep.k8s.io/4247 // beta: v1.28 // // Enables the scheduler's enhancement called QueueingHints, // which benefits to reduce the useless requeueing. SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints" - // owner: @saschagrunert - // kep: https://kep.k8s.io/2413 - // alpha: v1.22 - // beta: v1.25 - // ga: v1.27 - // - // Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. - SeccompDefault featuregate.Feature = "SeccompDefault" - // owner: @mtardy // alpha: v1.0 // @@ -783,10 +762,48 @@ const ( // https://github.com/kubernetes/kubernetes/issues/111516 SecurityContextDeny featuregate.Feature = "SecurityContextDeny" + // owner: @atosatto @yuanchen8911 + // kep: http://kep.k8s.io/3902 + // beta: v1.29 + // + // Decouples Taint Eviction Controller, performing taint-based Pod eviction, from Node Lifecycle Controller. + SeparateTaintEvictionController featuregate.Feature = "SeparateTaintEvictionController" + + // owner: @munnerz + // kep: http://kep.k8s.io/4193 + // alpha: v1.29 + // + // Controls whether JTIs (UUIDs) are embedded into generated service account tokens, and whether these JTIs are + // recorded into the audit log for future requests made by these tokens. + ServiceAccountTokenJTI featuregate.Feature = "ServiceAccountTokenJTI" + + // owner: @munnerz + // kep: http://kep.k8s.io/4193 + // alpha: v1.29 + // + // Controls whether the apiserver supports binding service account tokens to Node objects. + ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding" + + // owner: @munnerz + // kep: http://kep.k8s.io/4193 + // alpha: v1.29 + // + // Controls whether the apiserver will validate Node claims in service account tokens. + ServiceAccountTokenNodeBindingValidation featuregate.Feature = "ServiceAccountTokenNodeBindingValidation" + + // owner: @munnerz + // kep: http://kep.k8s.io/4193 + // alpha: v1.29 + // + // Controls whether the apiserver embeds the node name and uid for the associated node when issuing + // service account tokens bound to Pod objects. + ServiceAccountTokenPodNodeInfo featuregate.Feature = "ServiceAccountTokenPodNodeInfo" + // owner: @xuzhenglun // kep: http://kep.k8s.io/3682 // alpha: v1.27 // beta: v1.28 + // stable: v1.29 // // Subdivide the NodePort range for dynamic and static port allocation. ServiceNodePortStaticSubrange featuregate.Feature = "ServiceNodePortStaticSubrange" @@ -837,14 +854,6 @@ const ( // Enables topology aware hints for EndpointSlices TopologyAwareHints featuregate.Feature = "TopologyAwareHints" - // owner: @lmdaly, @swatisehgal (for GA graduation) - // alpha: v1.16 - // beta: v1.18 - // GA: v1.27 - // - // Enable resource managers to make NUMA aligned decisions - TopologyManager featuregate.Feature = "TopologyManager" - // owner: @PiotrProkop // kep: https://kep.k8s.io/3545 // alpha: v1.26 @@ -886,6 +895,13 @@ const ( // Enables user namespace support for stateless pods. UserNamespacesSupport featuregate.Feature = "UserNamespacesSupport" + // owner: @mattcarry, @sunnylovestiramisu + // kep: https://kep.k8s.io/3751 + // alpha: v1.29 + // + // Enables user specified volume attributes for persistent volumes, like iops and throughput. + VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass" + // owner: @cofyc // alpha: v1.21 VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority" @@ -934,6 +950,31 @@ const ( // // Enables In-Place Pod Vertical Scaling InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" + + // owner: @Sh4d1,@RyanAoh + // kep: http://kep.k8s.io/1860 + // alpha: v1.29 + // LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service + LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode" + + // owner: @haircommander + // kep: http://kep.k8s.io/4210 + // alpha: v1.29 + // ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin + // to specify the age after which an image will be garbage collected. + ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge" + + // owner: @saschagrunert + // alpha: v1.28 + // + // Enables user namespace support for Pod Security Standards. Enabling this + // feature will modify all Pod Security Standard rules to allow setting: + // spec[.*].securityContext.[runAsNonRoot,runAsUser] + // This feature gate should only be enabled if all nodes in the cluster + // support the user namespace feature and have it enabled. The feature gate + // will not graduate or be enabled by default in future Kubernetes + // releases. + UserNamespacesPodSecurityStandards featuregate.Feature = "UserNamespacesPodSecurityStandards" ) func init() { @@ -949,16 +990,20 @@ func init() { var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha}, + AllowServiceLBStatusOnNonLB: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.29 + AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24 APISelfSubjectReview: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28; remove in 1.30 AppArmor: {Default: true, PreRelease: featuregate.Beta}, - CloudDualStackNodeIPs: {Default: false, PreRelease: featuregate.Alpha}, + CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.Beta}, ClusterTrustBundle: {Default: false, PreRelease: featuregate.Alpha}, + ClusterTrustBundleProjection: {Default: false, PreRelease: featuregate.Alpha}, + CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 @@ -975,13 +1020,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CSIMigrationRBD: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31 - CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - CSINodeExpandSecret: {Default: true, PreRelease: featuregate.Beta}, + CSINodeExpandSecret: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha}, - SkipReadOnlyValidationGCE: {Default: false, PreRelease: featuregate.Alpha}, + SkipReadOnlyValidationGCE: {Default: true, PreRelease: featuregate.Deprecated}, // remove in 1.31 + + TranslateStreamCloseWebsocketRequests: {Default: false, PreRelease: featuregate.Alpha}, CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, @@ -991,17 +1036,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CronJobsScheduledAnnotation: {Default: true, PreRelease: featuregate.Beta}, - CronJobTimeZone: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - DefaultHostNetworkHostPortsInPodTemplates: {Default: false, PreRelease: featuregate.Deprecated}, - DisableCloudProviders: {Default: false, PreRelease: featuregate.Alpha}, + DisableCloudProviders: {Default: true, PreRelease: featuregate.Beta}, - DisableKubeletCloudCredentialProviders: {Default: false, PreRelease: featuregate.Alpha}, + DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.Beta}, - DevicePluginCDIDevices: {Default: false, PreRelease: featuregate.Alpha}, + DisableNodeKubeProxyVersion: {Default: false, PreRelease: featuregate.Alpha}, - DownwardAPIHugePages: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.29 + DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.Beta}, DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, @@ -1013,8 +1056,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.30 - GRPCContainerProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.29 - GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta}, GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta}, @@ -1041,17 +1082,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - JobBackoffLimitPerIndex: {Default: false, PreRelease: featuregate.Alpha}, - - JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta}, JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, - JobPodReplacementPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - JobReadyPods: {Default: true, PreRelease: featuregate.Beta}, + JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta}, - JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 + JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha}, @@ -1065,20 +1102,22 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28, remove in 1.30 + KubeletSeparateDiskGC: {Default: false, PreRelease: featuregate.Alpha}, + KubeletTracing: {Default: true, PreRelease: featuregate.Beta}, KubeProxyDrainingTerminatingNodes: {Default: false, PreRelease: featuregate.Alpha}, - LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - LegacyServiceAccountTokenCleanUp: {Default: false, PreRelease: featuregate.Alpha}, + LegacyServiceAccountTokenCleanUp: {Default: true, PreRelease: featuregate.Beta}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta}, + MatchLabelKeysInPodAffinity: {Default: false, PreRelease: featuregate.Alpha}, + MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha}, @@ -1091,12 +1130,12 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, - MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha}, NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta}, + NFTablesProxyMode: {Default: false, PreRelease: featuregate.Alpha}, + NodeLogQuery: {Default: false, PreRelease: featuregate.Alpha}, NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 @@ -1105,7 +1144,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.Beta}, - PersistentVolumeLastPhaseTransitionTime: {Default: false, PreRelease: featuregate.Alpha}, + PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.Beta}, PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha}, @@ -1113,13 +1152,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta}, - PodReadyToStartContainersCondition: {Default: false, PreRelease: featuregate.Alpha}, + PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta}, - PodHostIPs: {Default: false, PreRelease: featuregate.Alpha}, + PodHostIPs: {Default: true, PreRelease: featuregate.Beta}, - PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta}, + PodLifecycleSleepAction: {Default: false, PreRelease: featuregate.Alpha}, - ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta}, ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, @@ -1127,25 +1166,33 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, - ReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta}, + ReadWriteOncePod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha}, - RetroactiveDefaultStorageClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, + RuntimeClassInImageCriAPI: {Default: false, PreRelease: featuregate.Alpha}, + ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta}, SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta}, - SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - SecurityContextDeny: {Default: false, PreRelease: featuregate.Alpha}, - ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.Beta}, + SeparateTaintEvictionController: {Default: true, PreRelease: featuregate.Beta}, + + ServiceAccountTokenJTI: {Default: false, PreRelease: featuregate.Alpha}, - SidecarContainers: {Default: false, PreRelease: featuregate.Alpha}, + ServiceAccountTokenPodNodeInfo: {Default: false, PreRelease: featuregate.Alpha}, + + ServiceAccountTokenNodeBinding: {Default: false, PreRelease: featuregate.Alpha}, + + ServiceAccountTokenNodeBindingValidation: {Default: false, PreRelease: featuregate.Alpha}, + + ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.29; remove in 1.31 + + SidecarContainers: {Default: true, PreRelease: featuregate.Beta}, SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta}, @@ -1157,8 +1204,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta}, - TopologyManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.27; remove in 1.29 - TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, TopologyManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta}, @@ -1167,6 +1212,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS UnknownVersionInteroperabilityProxy: {Default: false, PreRelease: featuregate.Alpha}, + VolumeAttributesClass: {Default: false, PreRelease: featuregate.Alpha}, + VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha}, UserNamespacesSupport: {Default: false, PreRelease: featuregate.Alpha}, @@ -1185,6 +1232,12 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PodIndexLabel: {Default: true, PreRelease: featuregate.Beta}, + LoadBalancerIPMode: {Default: false, PreRelease: featuregate.Alpha}, + + ImageMaximumGCAge: {Default: false, PreRelease: featuregate.Alpha}, + + UserNamespacesPodSecurityStandards: {Default: false, PreRelease: featuregate.Alpha}, + // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -1192,25 +1245,33 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, + + genericfeatures.KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + + genericfeatures.KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Beta}, - genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta}, + genericfeatures.StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + + genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, + + genericfeatures.ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go new file mode 100644 index 000000000..39673a958 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go @@ -0,0 +1,173 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// DefaultFs implements Filesystem using same-named functions from "os" and "io" +type DefaultFs struct { + root string +} + +var _ Filesystem = &DefaultFs{} + +// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests +func NewTempFs() Filesystem { + path, _ := os.MkdirTemp("", "tmpfs") + return &DefaultFs{ + root: path, + } +} + +func (fs *DefaultFs) prefix(path string) string { + if len(fs.root) == 0 { + return path + } + return filepath.Join(fs.root, path) +} + +// Stat via os.Stat +func (fs *DefaultFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(fs.prefix(name)) +} + +// Create via os.Create +func (fs *DefaultFs) Create(name string) (File, error) { + file, err := os.Create(fs.prefix(name)) + if err != nil { + return nil, err + } + return &defaultFile{file}, nil +} + +// Rename via os.Rename +func (fs *DefaultFs) Rename(oldpath, newpath string) error { + if !strings.HasPrefix(oldpath, fs.root) { + oldpath = fs.prefix(oldpath) + } + if !strings.HasPrefix(newpath, fs.root) { + newpath = fs.prefix(newpath) + } + return os.Rename(oldpath, newpath) +} + +// MkdirAll via os.MkdirAll +func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(fs.prefix(path), perm) +} + +// MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory +// named path, along with any necessary parents, and returns nil, or else returns an error. +// Permission bits perm (before umask) are used for all directories that +// MkdirAllWithPathCheck creates. +// If path is already a directory, MkdirAllWithPathCheck does nothing and returns nil. +// NOTE: In case of Windows NTFS, mount points are implemented as reparse-point +// (similar to symlink) and do not represent actual directory. Hence Directory existence +// check for windows NTFS will NOT check for dir, but for symlink presence. +func MkdirAllWithPathCheck(path string, perm os.FileMode) error { + if dir, err := os.Lstat(path); err == nil { + // If the path exists already, + // 1. for Unix/Linux OS, check if the path is directory. + // 2. for windows NTFS, check if the path is symlink instead of directory. + if dir.IsDir() || + (runtime.GOOS == "windows" && (dir.Mode()&os.ModeSymlink != 0)) { + return nil + } + return fmt.Errorf("path %v exists but is not a directory", path) + } + // If existence of path not known, attempt to create it. + if err := os.MkdirAll(path, perm); err != nil { + return err + } + return nil +} + +// Chtimes via os.Chtimes +func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(fs.prefix(name), atime, mtime) +} + +// RemoveAll via os.RemoveAll +func (fs *DefaultFs) RemoveAll(path string) error { + return os.RemoveAll(fs.prefix(path)) +} + +// Remove via os.Remove +func (fs *DefaultFs) Remove(name string) error { + return os.Remove(fs.prefix(name)) +} + +// ReadFile via os.ReadFile +func (fs *DefaultFs) ReadFile(filename string) ([]byte, error) { + return os.ReadFile(fs.prefix(filename)) +} + +// TempDir via os.MkdirTemp +func (fs *DefaultFs) TempDir(dir, prefix string) (string, error) { + return os.MkdirTemp(fs.prefix(dir), prefix) +} + +// TempFile via os.CreateTemp +func (fs *DefaultFs) TempFile(dir, prefix string) (File, error) { + file, err := os.CreateTemp(fs.prefix(dir), prefix) + if err != nil { + return nil, err + } + return &defaultFile{file}, nil +} + +// ReadDir via os.ReadDir +func (fs *DefaultFs) ReadDir(dirname string) ([]os.DirEntry, error) { + return os.ReadDir(fs.prefix(dirname)) +} + +// Walk via filepath.Walk +func (fs *DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(fs.prefix(root), walkFn) +} + +// defaultFile implements File using same-named functions from "os" +type defaultFile struct { + file *os.File +} + +// Name via os.File.Name +func (file *defaultFile) Name() string { + return file.file.Name() +} + +// Write via os.File.Write +func (file *defaultFile) Write(b []byte) (n int, err error) { + return file.file.Write(b) +} + +// Sync via os.File.Sync +func (file *defaultFile) Sync() error { + return file.file.Sync() +} + +// Close via os.File.Close +func (file *defaultFile) Close() error { + return file.file.Close() +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go new file mode 100644 index 000000000..6408e0fa8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "os" + "path/filepath" + "time" +) + +// Filesystem is an interface that we can use to mock various filesystem operations +type Filesystem interface { + // from "os" + Stat(name string) (os.FileInfo, error) + Create(name string) (File, error) + Rename(oldpath, newpath string) error + MkdirAll(path string, perm os.FileMode) error + Chtimes(name string, atime time.Time, mtime time.Time) error + RemoveAll(path string) error + Remove(name string) error + + // from "os" + ReadFile(filename string) ([]byte, error) + TempDir(dir, prefix string) (string, error) + TempFile(dir, prefix string) (File, error) + ReadDir(dirname string) ([]os.DirEntry, error) + Walk(root string, walkFn filepath.WalkFunc) error +} + +// File is an interface that we can use to mock various filesystem operations typically +// accessed through the File object from the "os" package +type File interface { + // for now, the only os.File methods used are those below, add more as necessary + Name() string + Write(b []byte) (n int, err error) + Sync() error + Close() error +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go new file mode 100644 index 000000000..df887f945 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_unix.go @@ -0,0 +1,37 @@ +//go:build freebsd || linux || darwin +// +build freebsd linux darwin + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "fmt" + "os" +) + +// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file +func IsUnixDomainSocket(filePath string) (bool, error) { + fi, err := os.Stat(filePath) + if err != nil { + return false, fmt.Errorf("stat file %s failed: %v", filePath, err) + } + if fi.Mode()&os.ModeSocket == 0 { + return false, nil + } + return true, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go new file mode 100644 index 000000000..cd6a11ed3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/util_windows.go @@ -0,0 +1,87 @@ +//go:build windows +// +build windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "fmt" + "net" + "os" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +const ( + // Amount of time to wait between attempting to use a Unix domain socket. + // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 + // the first attempt will most likely fail, hence the need to retry + socketDialRetryPeriod = 1 * time.Second + // Overall timeout value to dial a Unix domain socket, including retries + socketDialTimeout = 4 * time.Second +) + +// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file +// Note that due to the retry logic inside, it could take up to 4 seconds +// to determine whether or not the file path supplied is a Unix domain socket +func IsUnixDomainSocket(filePath string) (bool, error) { + // Due to the absence of golang support for os.ModeSocket in Windows (https://github.com/golang/go/issues/33357) + // we need to dial the file and check if we receive an error to determine if a file is Unix Domain Socket file. + + // Note that querrying for the Reparse Points (https://docs.microsoft.com/en-us/windows/win32/fileio/reparse-points) + // for the file (using FSCTL_GET_REPARSE_POINT) and checking for reparse tag: reparseTagSocket + // does NOT work in 1809 if the socket file is created within a bind mounted directory by a container + // and the FSCTL is issued in the host by the kubelet. + + // If the file does not exist, it cannot be a Unix domain socket. + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return false, fmt.Errorf("File %s not found. Err: %v", filePath, err) + } + + klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath) + // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely + // on the Unix Domain socket working on the very first try, hence the potential need to + // dial multiple times + var lastSocketErr error + err := wait.PollImmediate(socketDialRetryPeriod, socketDialTimeout, + func() (bool, error) { + klog.V(6).InfoS("Dialing the socket", "filePath", filePath) + var c net.Conn + c, lastSocketErr = net.Dial("unix", filePath) + if lastSocketErr == nil { + c.Close() + klog.V(6).InfoS("Socket dialed successfully", "filePath", filePath) + return true, nil + } + klog.V(6).InfoS("Failed the current attempt to dial the socket, so pausing before retry", + "filePath", filePath, "err", lastSocketErr, "socketDialRetryPeriod", + socketDialRetryPeriod) + return false, nil + }) + + // PollImmediate will return "timed out waiting for the condition" if the function it + // invokes never returns true + if err != nil { + klog.V(2).InfoS("Failed all attempts to dial the socket so marking it as a non-Unix Domain socket. Last socket error along with the error from PollImmediate follow", + "filePath", filePath, "lastSocketErr", lastSocketErr, "err", err) + return false, nil + } + return true, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go new file mode 100644 index 000000000..5141d97b1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/watcher.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "github.com/fsnotify/fsnotify" +) + +// FSWatcher is a callback-based filesystem watcher abstraction for fsnotify. +type FSWatcher interface { + // Initializes the watcher with the given watch handlers. + // Called before all other methods. + Init(FSEventHandler, FSErrorHandler) error + + // Starts listening for events and errors. + // When an event or error occurs, the corresponding handler is called. + Run() + + // Add a filesystem path to watch + AddWatch(path string) error +} + +// FSEventHandler is called when a fsnotify event occurs. +type FSEventHandler func(event fsnotify.Event) + +// FSErrorHandler is called when a fsnotify error occurs. +type FSErrorHandler func(err error) + +type fsnotifyWatcher struct { + watcher *fsnotify.Watcher + eventHandler FSEventHandler + errorHandler FSErrorHandler +} + +var _ FSWatcher = &fsnotifyWatcher{} + +// NewFsnotifyWatcher returns an implementation of FSWatcher that continuously listens for +// fsnotify events and calls the event handler as soon as an event is received. +func NewFsnotifyWatcher() FSWatcher { + return &fsnotifyWatcher{} +} + +func (w *fsnotifyWatcher) AddWatch(path string) error { + return w.watcher.Add(path) +} + +func (w *fsnotifyWatcher) Init(eventHandler FSEventHandler, errorHandler FSErrorHandler) error { + var err error + w.watcher, err = fsnotify.NewWatcher() + if err != nil { + return err + } + + w.eventHandler = eventHandler + w.errorHandler = errorHandler + return nil +} + +func (w *fsnotifyWatcher) Run() { + go func() { + defer w.watcher.Close() + for { + select { + case event := <-w.watcher.Events: + if w.eventHandler != nil { + w.eventHandler(event) + } + case err := <-w.watcher.Errors: + if w.errorHandler != nil { + w.errorHandler(err) + } + } + } + }() +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go index 75130a862..80a5f32a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go +++ b/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go @@ -23,7 +23,7 @@ import ( // Import the crypto/sha512 algorithm for the docker image parser to work with 384 and 512 sha hashes _ "crypto/sha512" - dockerref "github.com/docker/distribution/reference" + dockerref "github.com/distribution/reference" ) // ParseImageName parses a docker image string into three parts: repo, tag and digest. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 0b7b4e87e..94c2330af 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -333,6 +333,13 @@ type KubeletVolumeHost interface { WaitForCacheSync() error // Returns hostutil.HostUtils GetHostUtil() hostutil.HostUtils + + // Returns trust anchors from the named ClusterTrustBundle. + GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) + + // Returns trust anchors from the ClusterTrustBundles selected by signer + // name and label selector. + GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) } // AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use @@ -1057,7 +1064,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "registry.k8s.io/debian-base:v2.0.0", + Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.0", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go index c039ada40..51ad0344a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go @@ -21,12 +21,16 @@ package hostutil import ( "fmt" + "io/fs" "os" "path" "path/filepath" "strings" + "syscall" + "golang.org/x/sys/windows" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/mount-utils" utilpath "k8s.io/utils/path" ) @@ -87,9 +91,28 @@ func (hu *HostUtil) MakeRShared(path string) error { return nil } +func isSystemCannotAccessErr(err error) bool { + if fserr, ok := err.(*fs.PathError); ok { + errno, ok := fserr.Err.(syscall.Errno) + return ok && errno == windows.ERROR_CANT_ACCESS_FILE + } + + return false +} + // GetFileType checks for sockets/block/character devices func (hu *(HostUtil)) GetFileType(pathname string) (FileType, error) { - return getFileType(pathname) + filetype, err := getFileType(pathname) + + // os.Stat will return a 1920 error (windows.ERROR_CANT_ACCESS_FILE) if we use it on a Unix Socket + // on Windows. In this case, we need to use a different method to check if it's a Unix Socket. + if isSystemCannotAccessErr(err) { + if isSocket, errSocket := filesystem.IsUnixDomainSocket(pathname); errSocket == nil && isSocket { + return FileTypeSocket, nil + } + } + + return filetype, err } // PathExists checks whether the path exists diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go index 22854734f..6150ab8db 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/selinux.go @@ -168,10 +168,6 @@ func SupportsSELinuxContextMount(volumeSpec *volume.Spec, volumePluginMgr *volum // VolumeSupportsSELinuxMount returns true if given volume access mode can support mount with SELinux mount options. func VolumeSupportsSELinuxMount(volumeSpec *volume.Spec) bool { - // Right now, SELinux mount is supported only for ReadWriteOncePod volumes. - if !utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod) { - return false - } if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) { return false } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumeattributesclass.go b/vendor/k8s.io/kubernetes/pkg/volume/util/volumeattributesclass.go new file mode 100644 index 000000000..06d551691 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumeattributesclass.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sort" + + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + storagev1alpha1listers "k8s.io/client-go/listers/storage/v1alpha1" + "k8s.io/klog/v2" +) + +const ( + // AlphaIsDefaultVolumeAttributesClassAnnotation is the alpha version of IsDefaultVolumeAttributesClassAnnotation. + AlphaIsDefaultVolumeAttributesClassAnnotation = "volumeattributesclass.alpha.kubernetes.io/is-default-class" +) + +// GetDefaultVolumeAttributesClass returns the default VolumeAttributesClass from the store, or nil. +func GetDefaultVolumeAttributesClass(lister storagev1alpha1listers.VolumeAttributesClassLister, driverName string) (*storagev1alpha1.VolumeAttributesClass, error) { + list, err := lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + defaultClasses := []*storagev1alpha1.VolumeAttributesClass{} + for _, class := range list { + if IsDefaultVolumeAttributesClassAnnotation(class.ObjectMeta) && class.DriverName == driverName { + defaultClasses = append(defaultClasses, class) + klog.V(4).Infof("GetDefaultVolumeAttributesClass added: %s", class.Name) + } + } + + if len(defaultClasses) == 0 { + return nil, nil + } + + // Primary sort by creation timestamp, newest first + // Secondary sort by class name, ascending order + sort.Slice(defaultClasses, func(i, j int) bool { + if defaultClasses[i].CreationTimestamp.UnixNano() == defaultClasses[j].CreationTimestamp.UnixNano() { + return defaultClasses[i].Name < defaultClasses[j].Name + } + return defaultClasses[i].CreationTimestamp.UnixNano() > defaultClasses[j].CreationTimestamp.UnixNano() + }) + if len(defaultClasses) > 1 { + klog.V(4).Infof("%d default VolumeAttributesClass were found, choosing: %s", len(defaultClasses), defaultClasses[0].Name) + } + + return defaultClasses[0], nil +} + +// IsDefaultVolumeAttributesClassAnnotation returns a boolean if the default +// volume attributes class annotation is set +func IsDefaultVolumeAttributesClassAnnotation(obj metav1.ObjectMeta) bool { + return obj.Annotations[AlphaIsDefaultVolumeAttributesClassAnnotation] == "true" +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions b/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions index f3070eeed..660e7453f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions @@ -4,21 +4,62 @@ rules: # The following packages are okay to use: # # public API - - selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) + - selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils) allowedPrefixes: [ "" ] # stdlib - selectorRegexp: ^[a-z]+(/|$) allowedPrefixes: [ "" ] - # Ginkgo + Gomega. - - selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) + # stdlib x and proto + - selectorRegexp: ^golang.org/x|^google.golang.org/protobuf + allowedPrefixes: [ "" ] + + # Ginkgo + Gomega + - selectorRegexp: ^github.com/onsi/(ginkgo|gomega) + allowedPrefixes: [ "" ] + + # kube-openapi + - selectorRegexp: ^k8s.io/kube-openapi + allowedPrefixes: [ "" ] + + # Public SIG Repos + - selectorRegexp: ^sigs.k8s.io/(json|yaml|structured-merge-diff) allowedPrefixes: [ "" ] # some of the shared test helpers (but not E2E sub-packages!) - selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) allowedPrefixes: [ "" ] + # Third party deps + - selectorRegexp: ^github.com/|^gopkg.in + allowedPrefixes: [ + "gopkg.in/inf.v0", + "gopkg.in/yaml.v2", + "github.com/blang/semver/", + "github.com/davecgh/go-spew/spew", + "github.com/evanphx/json-patch", + "github.com/go-logr/logr", + "github.com/gogo/protobuf/proto", + "github.com/gogo/protobuf/sortkeys", + "github.com/golang/protobuf/proto", + "github.com/google/gnostic-models/openapiv2", + "github.com/google/gnostic-models/openapiv3", + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/gofuzz", + "github.com/google/uuid", + "github.com/imdario/mergo", + "github.com/prometheus/client_golang/", + "github.com/prometheus/client_model/", + "github.com/prometheus/common/", + "github.com/prometheus/procfs", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require" + ] + # Everything else isn't. # # In particular importing any test/e2e/framework/* package would be a diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS b/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS index f12bdef92..75130916b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS @@ -2,7 +2,6 @@ approvers: - andrewsykim - - fabriziopandini - pohly - oomichi - neolit123 @@ -10,7 +9,6 @@ approvers: reviewers: - sig-testing-reviewers - andrewsykim - - fabriziopandini - pohly - oomichi - neolit123 @@ -18,4 +16,5 @@ reviewers: labels: - area/e2e-test-framework emeritus_approvers: + - fabriziopandini - timothysc diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/README.md b/vendor/k8s.io/kubernetes/test/e2e/framework/README.md index f8ed1eff2..2f5e79677 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/README.md +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/README.md @@ -4,7 +4,7 @@ The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main usage is for these tests suites in the Kubernetes repository itself: - test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is used for conformance testing. -- test/e2e_node: runs on the same node as a kublet instance. Used for testing +- test/e2e_node: runs on the same node as a kubelet instance. Used for testing kubelet. - test/e2e_kubeadm: test suite for kubeadm. diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go b/vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go new file mode 100644 index 000000000..a82023533 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go @@ -0,0 +1,108 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +var ( + bugs []Bug + bugMutex sync.Mutex +) + +// RecordBug stores information about a bug in the E2E suite source code that +// cannot be reported through ginkgo.Fail because it was found outside of some +// test, for example during test registration. +// +// This can be used instead of raising a panic. Then all bugs can be reported +// together instead of failing after the first one. +func RecordBug(bug Bug) { + bugMutex.Lock() + defer bugMutex.Unlock() + + bugs = append(bugs, bug) +} + +type Bug struct { + FileName string + LineNumber int + Message string +} + +// NewBug creates a new bug with a location that is obtained by skipping a certain number +// of stack frames. Passing zero will record the source code location of the direct caller +// of NewBug. +func NewBug(message string, skip int) Bug { + location := types.NewCodeLocation(skip + 1) + return Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message} +} + +// FormatBugs produces a report that includes all bugs recorded earlier via +// RecordBug. An error is returned with the report if there have been bugs. +func FormatBugs() error { + bugMutex.Lock() + defer bugMutex.Unlock() + + if len(bugs) == 0 { + return nil + } + + lines := make([]string, 0, len(bugs)) + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("get current directory: %v", err) + } + // Sort by file name, line number, message. For the sake of simplicity + // this uses the full file name even though the output the may use a + // relative path. Usually the result should be the same because full + // paths will all have the same prefix. + sort.Slice(bugs, func(i, j int) bool { + switch strings.Compare(bugs[i].FileName, bugs[j].FileName) { + case -1: + return true + case 1: + return false + } + if bugs[i].LineNumber < bugs[j].LineNumber { + return true + } + if bugs[i].LineNumber > bugs[j].LineNumber { + return false + } + return bugs[i].Message < bugs[j].Message + }) + for _, bug := range bugs { + // Use relative paths, if possible. + path := bug.FileName + if wd != "" { + if relpath, err := filepath.Rel(wd, bug.FileName); err == nil { + path = relpath + } + } + lines = append(lines, fmt.Sprintf("ERROR: %s:%d: %s\n", path, bug.LineNumber, strings.TrimSpace(bug.Message))) + } + return errors.New(strings.Join(lines, "")) +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/events/events.go b/vendor/k8s.io/kubernetes/test/e2e/framework/events/events.go index b38c4a1ee..1ad19b455 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/events/events.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/events/events.go @@ -34,7 +34,7 @@ type Action func() error // Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests. func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { interval := 2 * time.Second - return wait.PollImmediateWithContext(ctx, interval, timeout, eventOccurred(c, namespace, eventSelector, msg)) + return wait.PollUntilContextTimeout(ctx, interval, timeout, true, eventOccurred(c, namespace, eventSelector, msg)) } func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/expect.go b/vendor/k8s.io/kubernetes/test/e2e/framework/expect.go index 5ad38f864..726e754b3 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/expect.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/expect.go @@ -212,8 +212,9 @@ func newAsyncAssertion(ctx context.Context, args []interface{}, consistently boo args: args, // PodStart is used as default because waiting for a pod is the // most common operation. - timeout: TestContext.timeouts.PodStart, - interval: TestContext.timeouts.Poll, + timeout: TestContext.timeouts.PodStart, + interval: TestContext.timeouts.Poll, + consistently: consistently, } } @@ -292,13 +293,6 @@ func (f *FailureError) backtrace() { // } var ErrFailure error = FailureError{} -// ExpectEqual expects the specified two are the same, otherwise an exception raises -// -// Deprecated: use gomega.Expect().To(gomega.Equal()) -func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) -} - // ExpectNotEqual expects the specified two are not the same, otherwise an exception raises // // Deprecated: use gomega.Expect().ToNot(gomega.Equal()) @@ -362,24 +356,3 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { } Fail(prefix+err.Error(), 1+offset) } - -// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter. -// -// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead -func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) -} - -// ExpectHaveKey expects the actual map has the key in the keyset -// -// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead -func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) -} - -// ExpectEmpty expects actual is empty -// -// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead -func ExpectEmpty(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go b/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go index e35fc4ae9..e04eeff74 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go @@ -17,13 +17,73 @@ limitations under the License. package framework import ( + "fmt" "path" "reflect" + "regexp" + "slices" + "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +// Feature is the name of a certain feature that the cluster under test must have. +// Such features are different from feature gates. +type Feature string + +// Environment is the name for the environment in which a test can run, like +// "Linux" or "Windows". +type Environment string + +// NodeFeature is the name of a feature that a node must support. To be +// removed, see +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-testing/3041-node-conformance-and-features#nodefeature. +type NodeFeature string + +type Valid[T comparable] struct { + items sets.Set[T] + frozen bool +} + +// Add registers a new valid item name. The expected usage is +// +// var SomeFeature = framework.ValidFeatures.Add("Some") +// +// during the init phase of an E2E suite. Individual tests should not register +// their own, to avoid uncontrolled proliferation of new items. E2E suites can, +// but don't have to, enforce that by freezing the set of valid names. +func (v *Valid[T]) Add(item T) T { + if v.frozen { + RecordBug(NewBug(fmt.Sprintf(`registry %T is already frozen, "%v" must not be added anymore`, *v, item), 1)) + } + if v.items == nil { + v.items = sets.New[T]() + } + if v.items.Has(item) { + RecordBug(NewBug(fmt.Sprintf(`registry %T already contains "%v", it must not be added again`, *v, item), 1)) + } + v.items.Insert(item) + return item +} + +func (v *Valid[T]) Freeze() { + v.frozen = true +} + +// These variables contain the parameters that [WithFeature], [WithEnvironment] +// and [WithNodeFeatures] accept. The framework itself has no pre-defined +// constants. Test suites and tests may define their own and then add them here +// before calling these With functions. +var ( + ValidFeatures Valid[Feature] + ValidEnvironments Valid[Environment] + ValidNodeFeatures Valid[NodeFeature] ) var errInterface = reflect.TypeOf((*error)(nil)).Elem() @@ -65,8 +125,433 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati return codeLocation } +// SIGDescribe returns a wrapper function for ginkgo.Describe which injects +// the SIG name as annotation. The parameter should be lowercase with +// no spaces and no sig- or SIG- prefix. +func SIGDescribe(sig string) func(...interface{}) bool { + if !sigRE.MatchString(sig) || strings.HasPrefix(sig, "sig-") { + RecordBug(NewBug(fmt.Sprintf("SIG label must be lowercase, no spaces and no sig- prefix, got instead: %q", sig), 1)) + } + return func(args ...interface{}) bool { + args = append([]interface{}{WithLabel("sig-" + sig)}, args...) + return registerInSuite(ginkgo.Describe, args) + } +} + +var sigRE = regexp.MustCompile(`^[a-z]+(-[a-z]+)*$`) + // ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. -func ConformanceIt(text string, args ...interface{}) bool { - args = append(args, ginkgo.Offset(1)) - return ginkgo.It(text+" [Conformance]", args...) +func ConformanceIt(args ...interface{}) bool { + args = append(args, ginkgo.Offset(1), WithConformance()) + return It(args...) +} + +// It is a wrapper around [ginkgo.It] which supports framework With* labels as +// optional arguments in addition to those already supported by ginkgo itself, +// like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func It(args ...interface{}) bool { + return registerInSuite(ginkgo.It, args) +} + +// It is a shorthand for the corresponding package function. +func (f *Framework) It(args ...interface{}) bool { + return registerInSuite(ginkgo.It, args) +} + +// Describe is a wrapper around [ginkgo.Describe] which supports framework +// With* labels as optional arguments in addition to those already supported by +// ginkgo itself, like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func Describe(args ...interface{}) bool { + return registerInSuite(ginkgo.Describe, args) +} + +// Describe is a shorthand for the corresponding package function. +func (f *Framework) Describe(args ...interface{}) bool { + return registerInSuite(ginkgo.Describe, args) +} + +// Context is a wrapper around [ginkgo.Context] which supports framework With* +// labels as optional arguments in addition to those already supported by +// ginkgo itself, like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func Context(args ...interface{}) bool { + return registerInSuite(ginkgo.Context, args) +} + +// Context is a shorthand for the corresponding package function. +func (f *Framework) Context(args ...interface{}) bool { + return registerInSuite(ginkgo.Context, args) +} + +// registerInSuite is the common implementation of all wrapper functions. It +// expects to be called through one intermediate wrapper. +func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interface{}) bool { + var ginkgoArgs []interface{} + var offset ginkgo.Offset + var texts []string + + addLabel := func(label string) { + texts = append(texts, fmt.Sprintf("[%s]", label)) + ginkgoArgs = append(ginkgoArgs, ginkgo.Label(label)) + } + + haveEmptyStrings := false + for _, arg := range args { + switch arg := arg.(type) { + case label: + fullLabel := strings.Join(arg.parts, ":") + addLabel(fullLabel) + if arg.extra != "" { + addLabel(arg.extra) + } + if fullLabel == "Serial" { + ginkgoArgs = append(ginkgoArgs, ginkgo.Serial) + } + case ginkgo.Offset: + offset = arg + case string: + if arg == "" { + haveEmptyStrings = true + } + texts = append(texts, arg) + default: + ginkgoArgs = append(ginkgoArgs, arg) + } + } + offset += 2 // This function and its direct caller. + + // Now that we have the final offset, we can record bugs. + if haveEmptyStrings { + RecordBug(NewBug("empty strings as separators are unnecessary and need to be removed", int(offset))) + } + + // Enforce that text snippets to not start or end with spaces because + // those lead to double spaces when concatenating below. + for _, text := range texts { + if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") { + RecordBug(NewBug(fmt.Sprintf("trailing or leading spaces are unnecessary and need to be removed: %q", text), int(offset))) + } + } + + ginkgoArgs = append(ginkgoArgs, offset) + text := strings.Join(texts, " ") + return ginkgoCall(text, ginkgoArgs...) +} + +var ( + tagRe = regexp.MustCompile(`\[.*?\]`) + deprecatedTags = sets.New("Conformance", "NodeConformance", "Disruptive", "Serial", "Slow") + deprecatedTagPrefixes = sets.New("Environment", "Feature", "NodeFeature", "FeatureGate") + deprecatedStability = sets.New("Alpha", "Beta") +) + +// validateSpecs checks that the test specs were registered as intended. +func validateSpecs(specs types.SpecReports) { + checked := sets.New[call]() + + for _, spec := range specs { + for i, text := range spec.ContainerHierarchyTexts { + c := call{ + text: text, + location: spec.ContainerHierarchyLocations[i], + } + if checked.Has(c) { + // No need to check the same container more than once. + continue + } + checked.Insert(c) + validateText(c.location, text, spec.ContainerHierarchyLabels[i]) + } + c := call{ + text: spec.LeafNodeText, + location: spec.LeafNodeLocation, + } + if !checked.Has(c) { + validateText(spec.LeafNodeLocation, spec.LeafNodeText, spec.LeafNodeLabels) + checked.Insert(c) + } + } +} + +// call acts as (mostly) unique identifier for a container node call like +// Describe or Context. It's not perfect because theoretically a line might +// have multiple calls with the same text, but that isn't a problem in +// practice. +type call struct { + text string + location types.CodeLocation +} + +// validateText checks for some known tags that should not be added through the +// plain text strings anymore. Eventually, all such tags should get replaced +// with the new APIs. +func validateText(location types.CodeLocation, text string, labels []string) { + for _, tag := range tagRe.FindAllString(text, -1) { + if tag == "[]" { + recordTextBug(location, "[] in plain text is invalid") + continue + } + // Strip square brackets. + tag = tag[1 : len(tag)-1] + if slices.Contains(labels, tag) { + // Okay, was also set as label. + continue + } + if deprecatedTags.Has(tag) { + recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag)) + } + if deprecatedStability.Has(tag) { + recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added by defining the feature gate through WithFeatureGate instead", tag)) + } + if index := strings.Index(tag, ":"); index > 0 { + prefix := tag[:index] + if deprecatedTagPrefixes.Has(prefix) { + recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s(%s) instead", tag, prefix, tag[index+1:])) + } + } + } +} + +func recordTextBug(location types.CodeLocation, message string) { + RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}) +} + +// WithEnvironment specifies that a certain test or group of tests only works +// with a feature available. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The feature must be listed in ValidFeatures. +func WithFeature(name Feature) interface{} { + return withFeature(name) +} + +// WithFeature is a shorthand for the corresponding package function. +func (f *Framework) WithFeature(name Feature) interface{} { + return withFeature(name) +} + +func withFeature(name Feature) interface{} { + if !ValidFeatures.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithFeature: unknown feature %q", name), 2)) + } + return newLabel("Feature", string(name)) +} + +// WithFeatureGate specifies that a certain test or group of tests depends on a +// feature gate being enabled. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The feature gate must be listed in +// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a +// feature gate gets removed from there, the WithFeatureGate calls using it +// also need to be removed. +func WithFeatureGate(featureGate featuregate.Feature) interface{} { + return withFeatureGate(featureGate) +} + +// WithFeatureGate is a shorthand for the corresponding package function. +func (f *Framework) WithFeatureGate(featureGate featuregate.Feature) interface{} { + return withFeatureGate(featureGate) +} + +func withFeatureGate(featureGate featuregate.Feature) interface{} { + spec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[featureGate] + if !ok { + RecordBug(NewBug(fmt.Sprintf("WithFeatureGate: the feature gate %q is unknown", featureGate), 2)) + } + + // We use mixed case (i.e. Beta instead of BETA). GA feature gates have no level string. + var level string + if spec.PreRelease != "" { + level = string(spec.PreRelease) + level = strings.ToUpper(level[0:1]) + strings.ToLower(level[1:]) + } + + l := newLabel("FeatureGate", string(featureGate)) + l.extra = level + return l +} + +// WithEnvironment specifies that a certain test or group of tests only works +// in a certain environment. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The environment must be listed in ValidEnvironments. +func WithEnvironment(name Environment) interface{} { + return withEnvironment(name) +} + +// WithEnvironment is a shorthand for the corresponding package function. +func (f *Framework) WithEnvironment(name Environment) interface{} { + return withEnvironment(name) +} + +func withEnvironment(name Environment) interface{} { + if !ValidEnvironments.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithEnvironment: unknown environment %q", name), 2)) + } + return newLabel("Environment", string(name)) +} + +// WithNodeFeature specifies that a certain test or group of tests only works +// if the node supports a certain feature. The return value must be passed as +// additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +// +// The environment must be listed in ValidNodeFeatures. +func WithNodeFeature(name NodeFeature) interface{} { + return withNodeFeature(name) +} + +// WithNodeFeature is a shorthand for the corresponding package function. +func (f *Framework) WithNodeFeature(name NodeFeature) interface{} { + return withNodeFeature(name) +} + +func withNodeFeature(name NodeFeature) interface{} { + if !ValidNodeFeatures.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithNodeFeature: unknown environment %q", name), 2)) + } + return newLabel("NodeFeature", string(name)) +} + +// WithConformace specifies that a certain test or group of tests must pass in +// all conformant Kubernetes clusters. The return value must be passed as +// additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +func WithConformance() interface{} { + return withConformance() +} + +// WithConformance is a shorthand for the corresponding package function. +func (f *Framework) WithConformance() interface{} { + return withConformance() +} + +func withConformance() interface{} { + return newLabel("Conformance") +} + +// WithNodeConformance specifies that a certain test or group of tests for node +// functionality that does not depend on runtime or Kubernetes distro specific +// behavior. The return value must be passed as additional argument to +// [framework.It], [framework.Describe], [framework.Context]. +func WithNodeConformance() interface{} { + return withNodeConformance() +} + +// WithNodeConformance is a shorthand for the corresponding package function. +func (f *Framework) WithNodeConformance() interface{} { + return withNodeConformance() +} + +func withNodeConformance() interface{} { + return newLabel("NodeConformance") +} + +// WithDisruptive specifies that a certain test or group of tests temporarily +// affects the functionality of the Kubernetes cluster. The return value must +// be passed as additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +func WithDisruptive() interface{} { + return withDisruptive() +} + +// WithDisruptive is a shorthand for the corresponding package function. +func (f *Framework) WithDisruptive() interface{} { + return withDisruptive() +} + +func withDisruptive() interface{} { + return newLabel("Disruptive") +} + +// WithSerial specifies that a certain test or group of tests must not run in +// parallel with other tests. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// Starting with ginkgo v2, serial and parallel tests can be executed in the +// same invocation. Ginkgo itself will ensure that the serial tests run +// sequentially. +func WithSerial() interface{} { + return withSerial() +} + +// WithSerial is a shorthand for the corresponding package function. +func (f *Framework) WithSerial() interface{} { + return withSerial() +} + +func withSerial() interface{} { + return newLabel("Serial") +} + +// WithSlow specifies that a certain test or group of tests must not run in +// parallel with other tests. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +func WithSlow() interface{} { + return withSlow() +} + +// WithSlow is a shorthand for the corresponding package function. +func (f *Framework) WithSlow() interface{} { + return WithSlow() +} + +func withSlow() interface{} { + return newLabel("Slow") +} + +// WithLabel is a wrapper around [ginkgo.Label]. Besides adding an arbitrary +// label to a test, it also injects the label in square brackets into the test +// name. +func WithLabel(label string) interface{} { + return withLabel(label) +} + +// WithLabel is a shorthand for the corresponding package function. +func (f *Framework) WithLabel(label string) interface{} { + return withLabel(label) +} + +func withLabel(label string) interface{} { + return newLabel(label) +} + +type label struct { + // parts get concatenated with ":" to build the full label. + parts []string + // extra is an optional fully-formed extra label. + extra string +} + +func newLabel(parts ...string) label { + return label{parts: parts} +} + +// TagsEqual can be used to check whether two tags are the same. +// It's safe to compare e.g. the result of WithSlow() against the result +// of WithSerial(), the result will be false. False is also returned +// when a parameter is some completely different value. +func TagsEqual(a, b interface{}) bool { + al, ok := a.(label) + if !ok { + return false + } + bl, ok := b.(label) + if !ok { + return false + } + if al.extra != bl.extra { + return false + } + return slices.Equal(al.parts, bl.parts) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/internal/junit/junit.go b/vendor/k8s.io/kubernetes/test/e2e/framework/internal/junit/junit.go index 2f61c869c..a33c780d5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/internal/junit/junit.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/internal/junit/junit.go @@ -36,6 +36,10 @@ func WriteJUnitReport(report ginkgo.Report, filename string) error { // both, then tools like kettle and spyglass would concatenate // the two strings and thus show duplicated information. OmitFailureMessageAttr: true, + + // All labels are also part of the spec texts in inline [] tags, + // so we don't need to write them separately. + OmitSpecLabels: true, } return reporters.GenerateJUnitReportWithConfig(report, filename, config) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go index 2fdcd842d..768645268 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go @@ -181,21 +181,34 @@ func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (Kubelet return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items) } kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port - return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort)) + return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics") } -func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) { +// GrabresourceMetricsFromKubelet returns resource metrics from kubelet +func (g *Grabber) GrabResourceMetricsFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) { + nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()}) + if err != nil { + return KubeletMetrics{}, err + } + if len(nodes.Items) != 1 { + return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items) + } + kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port + return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics/resource") +} + +func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (KubeletMetrics, error) { if kubeletPort <= 0 || kubeletPort > 65535 { return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort) } - output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort)) + output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort), pathSuffix) if err != nil { return KubeletMetrics{}, err } return parseKubeletMetrics(output) } -func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) { +func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (string, error) { // There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock. finished := make(chan struct{}, 1) var err error @@ -205,7 +218,7 @@ func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubel Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)). - Suffix("metrics"). + Suffix(pathSuffix). Do(ctx).Raw() finished <- struct{}{} }() @@ -239,7 +252,7 @@ func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, erro var lastMetricsFetchErr error var output string - if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { @@ -290,7 +303,7 @@ func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerMana var output string var lastMetricsFetchErr error - if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { @@ -329,7 +342,7 @@ func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string var output string var lastMetricsFetchErr error - if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { + if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port) return lastMetricsFetchErr == nil, nil }); metricsWaitErr != nil { @@ -432,7 +445,7 @@ func (g *Grabber) Grab(ctx context.Context) (Collection, error) { } else { for _, node := range nodes.Items { kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port - metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort)) + metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort), "metrics") if err != nil { errs = append(errs, err) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go b/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go index 90a9dbb59..f473d4ffa 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go @@ -22,6 +22,7 @@ import ( "time" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -63,7 +64,7 @@ func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName str ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue) node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(node.Labels[labelKey], labelValue) + gomega.Expect(node.Labels).To(gomega.HaveKeyWithValue(labelKey, labelValue)) } // RemoveLabelOffNode is for cleaning up labels temporarily added to node, @@ -120,7 +121,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes) var notReady []*v1.Node - err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) @@ -141,7 +142,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil }) - if err != nil && err != wait.ErrWaitTimeout { + if err != nil && !wait.Interrupted(err) { return err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go b/vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go index 2e0b02c0f..cb7dc573d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go @@ -36,7 +36,7 @@ func WaitForSSHTunnels(ctx context.Context, namespace string) { defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test") // allow up to a minute for new ssh tunnels to establish - wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) { + wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { _, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test") return err == nil, nil }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go b/vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go index cc0294230..d87e59b3d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go @@ -51,7 +51,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim var notReady []v1.Node var missingPodsPerNode map[string][]string - err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"}) @@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil }) - if err != nil && err != wait.ErrWaitTimeout { + if err != nil && !wait.Interrupted(err) { return err } @@ -192,7 +192,7 @@ func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout ti func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error - if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) { + if wait.PollUntilContextTimeout(ctx, poll, singleCallTimeout, true, func(ctx context.Context) (bool, error) { nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go index d83314d19..4a90e5634 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go @@ -24,8 +24,8 @@ import ( ) // Get creates a function which retrieves the pod anew each time the function -// is called. Fatal errors are detected by framework.HandleRetry and cause +// is called. Fatal errors are detected by framework.GetObject and cause // polling to stop. func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] { - return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{})) + return framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go b/vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go index 006091870..3c43c20e0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go @@ -297,7 +297,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { var resultPV *v1.PersistentVolume var lastCreateErr error - err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) { + err := wait.PollUntilContextTimeout(ctx, 29*time.Second, timeouts.PVCreate, true, func(ctx context.Context) (done bool, err error) { resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) if lastCreateErr != nil { // If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP. @@ -648,7 +648,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P Spec: v1.PersistentVolumeClaimSpec{ Selector: cfg.Selector, AccessModes: cfg.AccessModes, - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse(cfg.ClaimSize), }, diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go b/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go index 5624b3e4a..774ca1b35 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go @@ -86,6 +86,11 @@ func GetSigner(provider string) (ssh.Signer, error) { if keyfile == "" { keyfile = "id_rsa" } + case "azure": + keyfile = os.Getenv("AZURE_SSH_KEY") + if keyfile == "" { + keyfile = "id_rsa" + } default: return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider) } @@ -422,7 +427,7 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error - if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) { + if wait.PollUntilContextTimeout(ctx, pollNodeInterval, singleCallTimeout, true, func(ctx context.Context) (bool, error) { nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go index 096411cfd..8a0af062d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go @@ -23,9 +23,11 @@ import ( "errors" "flag" "fmt" + "io" "math" "os" "path" + "path/filepath" "sort" "strings" "time" @@ -36,6 +38,7 @@ import ( "github.com/onsi/gomega" gomegaformat "github.com/onsi/gomega/format" + "k8s.io/apimachinery/pkg/util/sets" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" cliflag "k8s.io/component-base/cli/flag" @@ -53,6 +56,20 @@ const ( DefaultNumNodes = -1 ) +var ( + // Output is used for output when not running tests, for example in -list-tests. + // Test output should go to ginkgo.GinkgoWriter. + Output io.Writer = os.Stdout + + // Exit is called when the framework detects fatal errors or when + // it is done with the execution of e.g. -list-tests. + Exit = os.Exit + + // CheckForBugs determines whether the framework bails out when + // test initialization found any bugs. + CheckForBugs = true +) + // TestContextType contains test settings and global state. Due to // historic reasons, it is a mixture of items managed by the test // framework itself, cloud providers and individual tests. @@ -82,18 +99,21 @@ const ( // Test suite authors can use framework/viper to make all command line // parameters also configurable via a configuration file. type TestContextType struct { - KubeConfig string - KubeContext string - KubeAPIContentType string - KubeletRootDir string - CertDir string - Host string - BearerToken string `datapolicy:"token"` + KubeConfig string + KubeContext string + KubeAPIContentType string + KubeletRootDir string + KubeletConfigDropinDir string + CertDir string + Host string + BearerToken string `datapolicy:"token"` // TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987. RepoRoot string // ListImages will list off all images that are used then quit ListImages bool + listTests, listLabels bool + // ListConformanceTests will list off all conformance tests that are available then quit ListConformanceTests bool @@ -356,6 +376,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) { flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.") flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.") + flags.BoolVar(&TestContext.listLabels, "list-labels", false, "If true, will show the list of labels that can be used to select tests via -ginkgo.label-filter.") + flags.BoolVar(&TestContext.listTests, "list-tests", false, "If true, will show the full names of all tests (aka specs) that can be used to select test via -ginkgo.focus/skip.") flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.") @@ -482,7 +504,7 @@ func AfterReadingAllFlags(t *TestContextType) { for _, v := range image.GetImageConfigs() { fmt.Println(v.GetE2EImage()) } - os.Exit(0) + Exit(0) } // Reconfigure gomega defaults. The poll interval should be suitable @@ -494,6 +516,20 @@ func AfterReadingAllFlags(t *TestContextType) { gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart) gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort) + // ginkgo.PreviewSpecs will expand all nodes and thus may find new bugs. + report := ginkgo.PreviewSpecs("Kubernetes e2e test statistics") + validateSpecs(report.SpecReports) + if err := FormatBugs(); CheckForBugs && err != nil { + // Refuse to do anything if the E2E suite is buggy. + fmt.Fprint(Output, "ERROR: E2E suite initialization was faulty, these errors must be fixed:") + fmt.Fprint(Output, "\n"+err.Error()) + Exit(1) + } + if t.listLabels || t.listTests { + listTestInformation(report) + Exit(0) + } + // Only set a default host if one won't be supplied via kubeconfig if len(t.Host) == 0 && len(t.KubeConfig) == 0 { // Check if we can use the in-cluster config @@ -553,7 +589,7 @@ func AfterReadingAllFlags(t *TestContextType) { } else { klog.Errorf("Failed to setup provider config for %q: %v", TestContext.Provider, err) } - os.Exit(1) + Exit(1) } if TestContext.ReportDir != "" { @@ -563,13 +599,13 @@ func AfterReadingAllFlags(t *TestContextType) { // in parallel, so we will get "exists" error in most of them. if err := os.MkdirAll(TestContext.ReportDir, 0777); err != nil && !os.IsExist(err) { klog.Errorf("Create report dir: %v", err) - os.Exit(1) + Exit(1) } ginkgoDir := path.Join(TestContext.ReportDir, "ginkgo") if TestContext.ReportCompleteGinkgo || TestContext.ReportCompleteJUnit { if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) { klog.Errorf("Create /ginkgo: %v", err) - os.Exit(1) + Exit(1) } } @@ -600,3 +636,47 @@ func AfterReadingAllFlags(t *TestContextType) { }) } } + +func listTestInformation(report ginkgo.Report) { + indent := strings.Repeat(" ", 4) + + if TestContext.listLabels { + labels := sets.New[string]() + for _, spec := range report.SpecReports { + if spec.LeafNodeType == types.NodeTypeIt { + labels.Insert(spec.Labels()...) + } + } + fmt.Fprintf(Output, "The following labels can be used with 'gingko run --label-filter':\n%s%s\n\n", indent, strings.Join(sets.List(labels), "\n"+indent)) + } + if TestContext.listTests { + leafs := make([][]string, 0, len(report.SpecReports)) + wd, _ := os.Getwd() + for _, spec := range report.SpecReports { + if spec.LeafNodeType == types.NodeTypeIt { + leafs = append(leafs, []string{fmt.Sprintf("%s:%d: ", relativePath(wd, spec.LeafNodeLocation.FileName), spec.LeafNodeLocation.LineNumber), spec.FullText()}) + } + } + // Sort by test name, not the source code location, because the test + // name is more stable across code refactoring. + sort.Slice(leafs, func(i, j int) bool { + return leafs[i][1] < leafs[j][1] + }) + fmt.Fprint(Output, "The following spec names can be used with 'ginkgo run --focus/skip':\n") + for _, leaf := range leafs { + fmt.Fprintf(Output, "%s%s%s\n", indent, leaf[0], leaf[1]) + } + fmt.Fprint(Output, "\n") + } +} + +func relativePath(wd, path string) string { + if wd == "" { + return path + } + relpath, err := filepath.Rel(wd, path) + if err != nil { + return path + } + return relpath +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go b/vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go index 5cfc29edb..6bb89f674 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go @@ -39,6 +39,7 @@ var defaultTimeouts = TimeoutContext{ SystemPodsStartup: 10 * time.Minute, NodeSchedulable: 30 * time.Minute, SystemDaemonsetStartup: 5 * time.Minute, + NodeNotReady: 3 * time.Minute, } // TimeoutContext contains timeout settings for several actions. @@ -106,6 +107,9 @@ type TimeoutContext struct { // SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready. SystemDaemonsetStartup time.Duration + + // NodeNotReady is how long to wait for a node to be not ready. + NodeNotReady time.Duration } // NewTimeoutContext returns a TimeoutContext with all values set either to diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index f10e3254c..16b7be34a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -136,7 +136,7 @@ var ( BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) // ProvidersWithSSH are those providers where each node is accessible with SSH - ProvidersWithSSH = []string{"gce", "gke", "aws", "local"} + ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"} // ServeHostnameImage is a serve hostname image name. ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost) @@ -352,7 +352,7 @@ func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface } // Be robust about making the namespace creation call. var got *v1.Namespace - if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, Poll, 30*time.Second, true, func(ctx context.Context) (bool, error) { var err error got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{}) if err != nil { @@ -808,7 +808,7 @@ retriesLoop: if errs.Len() > 0 { Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - ")) } - ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents)) + gomega.Expect(expectedWatchEvents).To(gomega.HaveLen(totalValidWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents)) break retriesLoop } } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go b/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go index c84d9222d..e64d7ad7f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go @@ -238,7 +238,7 @@ func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName stri // WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated. func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error { - waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { + waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) { _, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{}) if err != nil { // if the volumeattachment object is not found, it means it has been terminated. @@ -697,7 +697,7 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi if err != nil { if exiterr, ok := err.(clientexec.ExitError); ok { actualExitCode := exiterr.ExitStatus() - framework.ExpectEqual(actualExitCode, exitCode, + gomega.Expect(actualExitCode).To(gomega.Equal(exitCode), "%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s", shExec, exitCode, actualExitCode, exiterr, stdout, stderr) } else { @@ -706,5 +706,5 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi shExec, exitCode, err, stdout, stderr) } } - framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode) + gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go index 7bd007044..2257e0328 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go @@ -16,9 +16,7 @@ limitations under the License. package utils -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-storage] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("storage") diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go index 2c4fc261f..b6f18a118 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go @@ -70,8 +70,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string framework.ExpectNoError(err) framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr) fsGroupResult := strings.Fields(stdout)[3] - framework.ExpectEqual(expectedFSGroup, fsGroupResult, - "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult) + gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult) } // getKubeletMainPid return the Main PID of the Kubelet Process @@ -141,14 +140,14 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) if checkSubpath { ginkgo.By("Expecting the volume subpath mount to be found.") result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) } ginkgo.By("Writing to the volume.") @@ -201,7 +200,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.") - framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) ginkgo.By("Testing that written file is accessible in the second pod.") CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed) @@ -262,13 +261,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c client result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) ginkgo.By("Expecting the symlinks from global map path to be found.") result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") - framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code)) + gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected find exit code of 0, got %d", result.Code)) // This command is to make sure kubelet is started after test finishes no matter it fails or not. ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod) @@ -699,7 +698,7 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr) ll := strings.Fields(stdout) framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid) - framework.ExpectEqual(ll[3], expectedGid) + gomega.Expect(ll[3]).To(gomega.Equal(expectedGid)) } // ChangeFilePathGidInPod changes the GID of the target filepath. diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in index 3b82a2fd2..72164958b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in @@ -11,6 +11,8 @@ spec: metadata: labels: app: httpd + annotations: + annotations_app: annotations_httpd spec: containers: - name: httpd diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin/sample-device-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin/sample-device-plugin.yaml index 8960f3b00..0877b16e9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin/sample-device-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin/sample-device-plugin.yaml @@ -31,6 +31,9 @@ spec: - name: dev hostPath: path: /dev + - name: cdi-dir + hostPath: + path: /var/run/cdi containers: - image: registry.k8s.io/e2e-test-images/sample-device-plugin:1.3 name: sample-device-plugin @@ -46,5 +49,7 @@ spec: mountPath: /var/lib/kubelet/plugins_registry - name: dev mountPath: /dev + - name: cdi-dir + mountPath: /var/run/cdi updateStrategy: type: RollingUpdate diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml index f69371b93..ab558f914 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml @@ -52,8 +52,8 @@ spec: # Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/ # and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS. - # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes - - image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 + # Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes + - image: gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 name: nvidia-driver-installer resources: requests: diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 8b3384bb5..72f2394e9 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -238,11 +238,11 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} - configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"} + configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.8"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.3"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} diff --git a/vendor/k8s.io/kubernetes/test/utils/runners.go b/vendor/k8s.io/kubernetes/test/utils/runners.go index 3cbc8a7bb..cfc2bc116 100644 --- a/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -1502,7 +1502,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}, StorageClassName: &storageClass, - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"), }, diff --git a/vendor/k8s.io/mount-utils/mount_helper_common.go b/vendor/k8s.io/mount-utils/mount_helper_common.go index 440cf64dd..fc7bbc143 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_common.go +++ b/vendor/k8s.io/mount-utils/mount_helper_common.go @@ -139,7 +139,7 @@ func removePathIfNotMountPoint(mountPath string, mounter Interface, extensiveMou } if notMnt { - klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + klog.V(4).Infof("%q is not a mountpoint, deleting", mountPath) return notMnt, os.Remove(mountPath) } return notMnt, nil @@ -147,7 +147,7 @@ func removePathIfNotMountPoint(mountPath string, mounter Interface, extensiveMou // removePath attempts to remove the directory. Returns nil if the directory was removed or does not exist. func removePath(mountPath string) error { - klog.V(4).Infof("Warning: deleting path %q", mountPath) + klog.V(4).Infof("Deleting path %q", mountPath) err := os.Remove(mountPath) if os.IsNotExist(err) { klog.V(4).Infof("%q does not exist", mountPath) diff --git a/vendor/k8s.io/mount-utils/mount_helper_unix.go b/vendor/k8s.io/mount-utils/mount_helper_unix.go index 82210aaa2..9193e7c8d 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -61,7 +61,7 @@ func IsCorruptedMnt(err error) bool { underlyingError = err } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN || underlyingError == syscall.EWOULDBLOCK } // MountInfo represents a single line in /proc//mountinfo. diff --git a/vendor/k8s.io/pod-security-admission/policy/check_runAsNonRoot.go b/vendor/k8s.io/pod-security-admission/policy/check_runAsNonRoot.go index c8dfcfdde..87b83727b 100644 --- a/vendor/k8s.io/pod-security-admission/policy/check_runAsNonRoot.go +++ b/vendor/k8s.io/pod-security-admission/policy/check_runAsNonRoot.go @@ -59,6 +59,11 @@ func CheckRunAsNonRoot() Check { } func runAsNonRoot_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + // See KEP-127: https://github.com/kubernetes/enhancements/blob/308ba8d/keps/sig-node/127-user-namespaces/README.md?plain=1#L411-L447 + if relaxPolicyForUserNamespacePod(podSpec) { + return CheckResult{Allowed: true} + } + // things that explicitly set runAsNonRoot=false var badSetters []string diff --git a/vendor/k8s.io/pod-security-admission/policy/check_runAsUser.go b/vendor/k8s.io/pod-security-admission/policy/check_runAsUser.go index de20f4d0a..eb9553ee0 100644 --- a/vendor/k8s.io/pod-security-admission/policy/check_runAsUser.go +++ b/vendor/k8s.io/pod-security-admission/policy/check_runAsUser.go @@ -60,6 +60,11 @@ func CheckRunAsUser() Check { } func runAsUser_1_23(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + // See KEP-127: https://github.com/kubernetes/enhancements/blob/308ba8d/keps/sig-node/127-user-namespaces/README.md?plain=1#L411-L447 + if relaxPolicyForUserNamespacePod(podSpec) { + return CheckResult{Allowed: true} + } + // things that explicitly set runAsUser=0 var badSetters []string diff --git a/vendor/k8s.io/pod-security-admission/policy/check_sysctls.go b/vendor/k8s.io/pod-security-admission/policy/check_sysctls.go index 95c5eff28..78950c3b7 100644 --- a/vendor/k8s.io/pod-security-admission/policy/check_sysctls.go +++ b/vendor/k8s.io/pod-security-admission/policy/check_sysctls.go @@ -43,6 +43,10 @@ spec.securityContext.sysctls[*].name 'net.ipv4.ping_group_range' 'net.ipv4.ip_unprivileged_port_start' 'net.ipv4.ip_local_reserved_ports' +'net.ipv4.tcp_keepalive_time' +'net.ipv4.tcp_fin_timeout' +'net.ipv4.tcp_keepalive_intvl' +'net.ipv4.tcp_keepalive_probes' */ @@ -59,25 +63,28 @@ func CheckSysctls() Check { Versions: []VersionedCheck{ { MinimumVersion: api.MajorMinorVersion(1, 0), - CheckPod: sysctls_1_0, + CheckPod: sysctlsV1Dot0, }, { MinimumVersion: api.MajorMinorVersion(1, 27), - CheckPod: sysctls_1_27, + CheckPod: sysctlsV1Dot27, + }, { + MinimumVersion: api.MajorMinorVersion(1, 29), + CheckPod: sysctlsV1Dot29, }, }, } } var ( - sysctls_allowed_1_0 = sets.NewString( + sysctlsAllowedV1Dot0 = sets.NewString( "kernel.shm_rmid_forced", "net.ipv4.ip_local_port_range", "net.ipv4.tcp_syncookies", "net.ipv4.ping_group_range", "net.ipv4.ip_unprivileged_port_start", ) - sysctls_allowed_1_27 = sets.NewString( + sysctlsAllowedV1Dot27 = sets.NewString( "kernel.shm_rmid_forced", "net.ipv4.ip_local_port_range", "net.ipv4.tcp_syncookies", @@ -85,14 +92,30 @@ var ( "net.ipv4.ip_unprivileged_port_start", "net.ipv4.ip_local_reserved_ports", ) + sysctlsAllowedV1Dot29 = sets.NewString( + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + "net.ipv4.ip_local_reserved_ports", + "net.ipv4.tcp_keepalive_time", + "net.ipv4.tcp_fin_timeout", + "net.ipv4.tcp_keepalive_intvl", + "net.ipv4.tcp_keepalive_probes", + ) ) -func sysctls_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { - return sysctls(podMetadata, podSpec, sysctls_allowed_1_0) +func sysctlsV1Dot0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot0) +} + +func sysctlsV1Dot27(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot27) } -func sysctls_1_27(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { - return sysctls(podMetadata, podSpec, sysctls_allowed_1_27) +func sysctlsV1Dot29(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot29) } func sysctls(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec, sysctls_allowed_set sets.String) CheckResult { diff --git a/vendor/k8s.io/pod-security-admission/policy/helpers.go b/vendor/k8s.io/pod-security-admission/policy/helpers.go index eb74f44e4..e35348929 100644 --- a/vendor/k8s.io/pod-security-admission/policy/helpers.go +++ b/vendor/k8s.io/pod-security-admission/policy/helpers.go @@ -16,7 +16,12 @@ limitations under the License. package policy -import "strings" +import ( + "strings" + "sync/atomic" + + corev1 "k8s.io/api/core/v1" +) func joinQuote(items []string) string { if len(items) == 0 { @@ -31,3 +36,21 @@ func pluralize(singular, plural string, count int) string { } return plural } + +var relaxPolicyForUserNamespacePods = &atomic.Bool{} + +// RelaxPolicyForUserNamespacePods allows opting into relaxing runAsUser / +// runAsNonRoot restricted policies for user namespace pods, before the +// usernamespace feature has reached GA and propagated to the oldest supported +// nodes. +// This should only be opted into in clusters where the administrator ensures +// all nodes in the cluster enable the user namespace feature. +func RelaxPolicyForUserNamespacePods(relax bool) { + relaxPolicyForUserNamespacePods.Store(relax) +} + +// relaxPolicyForUserNamespacePod returns true if a policy should be relaxed +// because of enabled user namespaces in the provided pod spec. +func relaxPolicyForUserNamespacePod(podSpec *corev1.PodSpec) bool { + return relaxPolicyForUserNamespacePods.Load() && podSpec != nil && podSpec.HostUsers != nil && !*podSpec.HostUsers +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 67c66faaf..b8833d26e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,12 +7,11 @@ github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2020-04-01-preview/authorization -github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -38,7 +37,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing # github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -47,7 +46,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 @@ -171,11 +170,10 @@ github.com/coreos/go-systemd/v22/journal # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/docker/distribution v2.8.2+incompatible -## explicit -github.com/docker/distribution/digestset -github.com/docker/distribution/reference -# github.com/emicklei/go-restful/v3 v3.10.2 +# github.com/distribution/reference v0.5.0 +## explicit; go 1.20 +github.com/distribution/reference +# github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -191,7 +189,7 @@ github.com/fsnotify/fsnotify # github.com/go-ini/ini v1.67.0 ## explicit github.com/go-ini/ini -# github.com/go-logr/logr v1.3.0 +# github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -230,9 +228,6 @@ github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/mock v1.6.0 -## explicit; go 1.11 -github.com/golang/mock/gomock # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/descriptor @@ -244,17 +239,21 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/cel-go v0.16.1 +# github.com/google/cel-go v0.17.7 ## explicit; go 1.18 github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls github.com/google/cel-go/common +github.com/google/cel-go/common/ast github.com/google/cel-go/common/containers github.com/google/cel-go/common/debug +github.com/google/cel-go/common/decls +github.com/google/cel-go/common/functions github.com/google/cel-go/common/operators github.com/google/cel-go/common/overloads github.com/google/cel-go/common/runes +github.com/google/cel-go/common/stdlib github.com/google/cel-go/common/types github.com/google/cel-go/common/types/pb github.com/google/cel-go/common/types/ref @@ -285,9 +284,12 @@ github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20230602010524-ada837c32108 ## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.5.0 ## explicit github.com/google/uuid +# github.com/gorilla/websocket v1.5.0 +## explicit; go 1.12 +github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus @@ -339,6 +341,9 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f +## explicit +github.com/mxk/go-flowrate/flowrate # github.com/onsi/ginkgo/v2 v2.13.2 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -377,11 +382,10 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/selinux v1.10.0 -## explicit; go 1.13 +# github.com/opencontainers/selinux v1.11.0 +## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label -github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir # github.com/pborman/uuid v1.2.1 ## explicit @@ -435,24 +439,24 @@ github.com/stoewer/go-strcase ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# go.etcd.io/etcd/api/v3 v3.5.9 -## explicit; go 1.19 +# go.etcd.io/etcd/api/v3 v3.5.10 +## explicit; go 1.20 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.9 -## explicit; go 1.19 +# go.etcd.io/etcd/client/pkg/v3 v3.5.10 +## explicit; go 1.20 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.9 -## explicit; go 1.19 +# go.etcd.io/etcd/client/v3 v3.5.10 +## explicit; go 1.20 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint @@ -517,6 +521,9 @@ go.opentelemetry.io/proto/otlp/trace/v1 # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic +# go.uber.org/mock v0.4.0 +## explicit; go 1.20 +go.uber.org/mock/gomock # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -569,7 +576,7 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.5.0 +# golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight @@ -748,8 +755,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.28.4 => k8s.io/api v0.28.4 -## explicit; go 1.20 +# k8s.io/api v0.29.0 => k8s.io/api v0.29.0 +## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -782,7 +789,7 @@ k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 -k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 @@ -804,13 +811,13 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.28.4 -## explicit; go 1.20 +# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.29.0 +## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.28.4 => k8s.io/apimachinery v0.28.4 -## explicit; go 1.20 +# k8s.io/apimachinery v0.29.0 => k8s.io/apimachinery v0.29.0 +## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -854,6 +861,7 @@ k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/proxy k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/remotecommand k8s.io/apimachinery/pkg/util/runtime @@ -871,8 +879,8 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.28.4 => k8s.io/apiserver v0.28.4 -## explicit; go 1.20 +# k8s.io/apiserver v0.29.0 => k8s.io/apiserver v0.29.0 +## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel k8s.io/apiserver/pkg/admission/configuration @@ -902,6 +910,7 @@ k8s.io/apiserver/pkg/apis/apiserver/install k8s.io/apiserver/pkg/apis/apiserver/v1 k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 k8s.io/apiserver/pkg/apis/apiserver/v1beta1 +k8s.io/apiserver/pkg/apis/apiserver/validation k8s.io/apiserver/pkg/apis/audit k8s.io/apiserver/pkg/apis/audit/install k8s.io/apiserver/pkg/apis/audit/v1 @@ -915,6 +924,7 @@ k8s.io/apiserver/pkg/audit k8s.io/apiserver/pkg/audit/policy k8s.io/apiserver/pkg/authentication/authenticator k8s.io/apiserver/pkg/authentication/authenticatorfactory +k8s.io/apiserver/pkg/authentication/cel k8s.io/apiserver/pkg/authentication/group k8s.io/apiserver/pkg/authentication/request/anonymous k8s.io/apiserver/pkg/authentication/request/bearertoken @@ -928,6 +938,7 @@ k8s.io/apiserver/pkg/authentication/token/tokenfile k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/authorization/authorizerfactory +k8s.io/apiserver/pkg/authorization/cel k8s.io/apiserver/pkg/authorization/path k8s.io/apiserver/pkg/authorization/union k8s.io/apiserver/pkg/cel @@ -1016,8 +1027,8 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.28.4 => k8s.io/client-go v0.28.4 -## explicit; go 1.20 +# k8s.io/client-go v0.29.0 => k8s.io/client-go v0.29.0 +## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1042,7 +1053,7 @@ k8s.io/client-go/applyconfigurations/discovery/v1beta1 k8s.io/client-go/applyconfigurations/events/v1 k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 -k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 @@ -1109,7 +1120,7 @@ k8s.io/client-go/informers/events/v1beta1 k8s.io/client-go/informers/extensions k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/flowcontrol -k8s.io/client-go/informers/flowcontrol/v1alpha1 +k8s.io/client-go/informers/flowcontrol/v1 k8s.io/client-go/informers/flowcontrol/v1beta1 k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/informers/flowcontrol/v1beta3 @@ -1200,8 +1211,8 @@ k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake -k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 @@ -1268,7 +1279,7 @@ k8s.io/client-go/listers/discovery/v1beta1 k8s.io/client-go/listers/events/v1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 -k8s.io/client-go/listers/flowcontrol/v1alpha1 +k8s.io/client-go/listers/flowcontrol/v1 k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/flowcontrol/v1beta2 k8s.io/client-go/listers/flowcontrol/v1beta3 @@ -1319,6 +1330,7 @@ k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/events +k8s.io/client-go/tools/internal/events k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager k8s.io/client-go/tools/portforward @@ -1329,6 +1341,7 @@ k8s.io/client-go/tools/remotecommand k8s.io/client-go/tools/watch k8s.io/client-go/transport k8s.io/client-go/transport/spdy +k8s.io/client-go/transport/websocket k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec @@ -1337,8 +1350,8 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.28.4 => k8s.io/cloud-provider v0.28.4 -## explicit; go 1.20 +# k8s.io/cloud-provider v0.29.0 => k8s.io/cloud-provider v0.29.0 +## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/app/config @@ -1356,8 +1369,8 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.28.4 => k8s.io/component-base v0.28.4 -## explicit; go 1.20 +# k8s.io/component-base v0.29.0 => k8s.io/component-base v0.29.0 +## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/config k8s.io/component-base/config/options @@ -1378,14 +1391,15 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.28.4 => k8s.io/component-helpers v0.28.4 -## explicit; go 1.20 +# k8s.io/component-helpers v0.29.0 => k8s.io/component-helpers v0.29.0 +## explicit; go 1.21 k8s.io/component-helpers/node/util +k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.28.4 => k8s.io/controller-manager v0.28.4 -## explicit; go 1.20 +# k8s.io/controller-manager v0.29.0 => k8s.io/controller-manager v0.29.0 +## explicit; go 1.21 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 k8s.io/controller-manager/config/v1alpha1 @@ -1405,13 +1419,13 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.28.4 -## explicit; go 1.20 +# k8s.io/kms v0.29.0 +## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -1423,7 +1437,6 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json -k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 @@ -1433,19 +1446,20 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.4 -## explicit; go 1.20 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.0 +## explicit; go 1.21 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.28.4 => k8s.io/kubelet v0.28.4 -## explicit; go 1.20 +# k8s.io/kubelet v0.29.0 => k8s.io/kubelet v0.29.0 +## explicit; go 1.21 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.28.5 -## explicit; go 1.20 +# k8s.io/kubernetes v1.29.0 +## explicit; go 1.21 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service k8s.io/kubernetes/pkg/api/v1/pod +k8s.io/kubernetes/pkg/api/v1/service k8s.io/kubernetes/pkg/apis/apps k8s.io/kubernetes/pkg/apis/autoscaling k8s.io/kubernetes/pkg/apis/batch @@ -1468,6 +1482,7 @@ k8s.io/kubernetes/pkg/fieldpath k8s.io/kubernetes/pkg/kubelet/events k8s.io/kubernetes/pkg/kubelet/server/metrics k8s.io/kubernetes/pkg/securitycontext +k8s.io/kubernetes/pkg/util/filesystem k8s.io/kubernetes/pkg/util/hash k8s.io/kubernetes/pkg/util/labels k8s.io/kubernetes/pkg/util/parsers @@ -1507,11 +1522,11 @@ k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/format k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/test/utils/kubeconfig -# k8s.io/mount-utils v0.28.4 => k8s.io/mount-utils v0.28.4 -## explicit; go 1.20 +# k8s.io/mount-utils v0.29.0 => k8s.io/mount-utils v0.29.0 +## explicit; go 1.21 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.28.4 => k8s.io/pod-security-admission v0.28.4 -## explicit; go 1.20 +# k8s.io/pod-security-admission v0.29.0 => k8s.io/pod-security-admission v0.29.0 +## explicit; go 1.21 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy # k8s.io/utils v0.0.0-20231127182322-b307cd553661 @@ -1535,13 +1550,13 @@ k8s.io/utils/ptr k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 -## explicit; go 1.17 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 +## explicit; go 1.20 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20231213062409-f1ce7de3fdcb +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -1555,7 +1570,6 @@ sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient -sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient @@ -1574,7 +1588,6 @@ sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient -sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient @@ -1590,12 +1603,15 @@ sigs.k8s.io/cloud-provider-azure/pkg/metrics sigs.k8s.io/cloud-provider-azure/pkg/provider sigs.k8s.io/cloud-provider-azure/pkg/provider/config sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer +sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil +sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil +sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231205023417-1ba5a224ab0e +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient @@ -1609,6 +1625,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled @@ -1633,6 +1650,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient # sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader @@ -1640,7 +1658,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge @@ -1651,32 +1669,32 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -# k8s.io/api => k8s.io/api v0.28.4 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.28.4 -# k8s.io/apiserver => k8s.io/apiserver v0.28.4 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.4 -# k8s.io/client-go => k8s.io/client-go v0.28.4 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4 -# k8s.io/code-generator => k8s.io/code-generator v0.28.4 -# k8s.io/component-base => k8s.io/component-base v0.28.4 -# k8s.io/component-helpers => k8s.io/component-helpers v0.28.4 -# k8s.io/controller-manager => k8s.io/controller-manager v0.28.4 -# k8s.io/cri-api => k8s.io/cri-api v0.28.4 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4 -# k8s.io/endpointslice => k8s.io/endpointslice v0.28.4 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4 -# k8s.io/kubectl => k8s.io/kubectl v0.28.4 -# k8s.io/kubelet => k8s.io/kubelet v0.28.4 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4 -# k8s.io/metrics => k8s.io/metrics v0.28.4 -# k8s.io/mount-utils => k8s.io/mount-utils v0.28.4 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.4 -# k8s.io/sample-controller => k8s.io/sample-controller v0.28.4 +# k8s.io/api => k8s.io/api v0.29.0 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0 +# k8s.io/apimachinery => k8s.io/apimachinery v0.29.0 +# k8s.io/apiserver => k8s.io/apiserver v0.29.0 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0 +# k8s.io/client-go => k8s.io/client-go v0.29.0 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0 +# k8s.io/code-generator => k8s.io/code-generator v0.29.0 +# k8s.io/component-base => k8s.io/component-base v0.29.0 +# k8s.io/component-helpers => k8s.io/component-helpers v0.29.0 +# k8s.io/controller-manager => k8s.io/controller-manager v0.29.0 +# k8s.io/cri-api => k8s.io/cri-api v0.29.0 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0 +# k8s.io/endpointslice => k8s.io/endpointslice v0.29.0 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0 +# k8s.io/kubectl => k8s.io/kubectl v0.29.0 +# k8s.io/kubelet => k8s.io/kubelet v0.29.0 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0 +# k8s.io/metrics => k8s.io/metrics v0.29.0 +# k8s.io/mount-utils => k8s.io/mount-utils v0.29.0 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.0 +# k8s.io/sample-controller => k8s.io/sample-controller v0.29.0 diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index 541e6a5aa..845a38c11 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -120,7 +120,7 @@ type grpcTunnel struct { stream client.ProxyService_ProxyClient sendLock sync.Mutex recvLock sync.Mutex - clientConn clientConn + grpcConn clientConn pendingDial pendingDialManager conns connectionManager @@ -197,7 +197,7 @@ func CreateSingleUseGrpcTunnelWithContext(createCtx, tunnelCtx context.Context, func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *grpcTunnel { t := grpcTunnel{ stream: stream, - clientConn: c, + grpcConn: c, pendingDial: pendingDialManager{pendingDials: make(map[int64]pendingDial)}, conns: connectionManager{conns: make(map[int64]*conn)}, readTimeoutSeconds: 10, @@ -238,7 +238,7 @@ func (t *grpcTunnel) closeMetric() { func (t *grpcTunnel) serve(tunnelCtx context.Context) { defer func() { - t.clientConn.Close() + t.grpcConn.Close() // A connection in t.conns after serve() returns means // we never received a CLOSE_RSP for it, so we need to @@ -278,7 +278,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { // 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context // // In either scenario, we should return here and close the tunnel as it is no longer needed. - kvs := []interface{}{"dialID", resp.Random, "connectID", resp.ConnectID} + kvs := []interface{}{"dialID", resp.Random, "connectionID", resp.ConnectID} if resp.Error != "" { kvs = append(kvs, "error", resp.Error) } @@ -349,14 +349,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) { if !ok { klog.ErrorS(nil, "Connection not recognized", "connectionID", resp.ConnectID, "packetType", "DATA") - t.Send(&client.Packet{ - Type: client.PacketType_CLOSE_REQ, - Payload: &client.Packet_CloseRequest{ - CloseRequest: &client.CloseRequest{ - ConnectID: resp.ConnectID, - }, - }, - }) + t.sendCloseRequest(resp.ConnectID) continue } timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second) @@ -448,9 +441,8 @@ func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address s klog.V(5).Infoln("DIAL_REQ sent to proxy server") c := &conn{ - tunnel: t, - random: random, - closeTunnel: t.closeTunnel, + tunnel: t, + random: random, } select { @@ -464,11 +456,17 @@ func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address s t.conns.add(res.connid, c) case <-time.After(30 * time.Second): klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random) - go t.closeDial(random) + go func() { + defer t.closeTunnel() + t.sendDialClose(random) + }() return nil, &dialFailure{"dial timeout, backstop", metrics.DialFailureTimeout} case <-requestCtx.Done(): klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", requestCtx.Err(), "dialID", random) - go t.closeDial(random) + go func() { + defer t.closeTunnel() + t.sendDialClose(random) + }() return nil, &dialFailure{"dial timeout, context", metrics.DialFailureContext} case <-t.done: klog.V(5).InfoS("Tunnel closed while waiting for DialResp", "dialID", random) @@ -483,7 +481,21 @@ func (t *grpcTunnel) Done() <-chan struct{} { } // Send a best-effort DIAL_CLS request for the given dial ID. -func (t *grpcTunnel) closeDial(dialID int64) { + +func (t *grpcTunnel) sendCloseRequest(connID int64) error { + req := &client.Packet{ + Type: client.PacketType_CLOSE_REQ, + Payload: &client.Packet_CloseRequest{ + CloseRequest: &client.CloseRequest{ + ConnectID: connID, + }, + }, + } + klog.V(5).InfoS("[tracing] send req", "type", req.Type) + return t.Send(req) +} + +func (t *grpcTunnel) sendDialClose(dialID int64) error { req := &client.Packet{ Type: client.PacketType_DIAL_CLS, Payload: &client.Packet_CloseDial{ @@ -492,15 +504,13 @@ func (t *grpcTunnel) closeDial(dialID int64) { }, }, } - if err := t.Send(req); err != nil { - klog.V(5).InfoS("Failed to send DIAL_CLS", "err", err, "dialID", dialID) - } - t.closeTunnel() + klog.V(5).InfoS("[tracing] send req", "type", req.Type) + return t.Send(req) } func (t *grpcTunnel) closeTunnel() { atomic.StoreUint32(&t.closing, 1) - t.clientConn.Close() + t.grpcConn.Close() } func (t *grpcTunnel) isClosing() bool { diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go index f4d3f7886..013515617 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go @@ -20,6 +20,7 @@ import ( "errors" "io" "net" + "sync/atomic" "time" "k8s.io/klog/v2" @@ -31,25 +32,31 @@ import ( // successful delivery of CLOSE_REQ. const CloseTimeout = 10 * time.Second +var errConnTunnelClosed = errors.New("tunnel closed") var errConnCloseTimeout = errors.New("close timeout") // conn is an implementation of net.Conn, where the data is transported // over an established tunnel defined by a gRPC service ProxyService. type conn struct { - tunnel *grpcTunnel - connID int64 - random int64 - readCh chan []byte + tunnel *grpcTunnel + // connID is set when a successful DIAL_RSP is received + connID int64 + // random (dialID) is always initialized + random int64 + readCh chan []byte + // On receiving CLOSE_RSP, closeCh will be sent any error message and closed. closeCh chan string rdata []byte - // closeTunnel is an optional callback to close the underlying grpc connection. - closeTunnel func() + // closing is an atomic bool represented as a 0 or 1, and set to true when the connection is being closed. + // closing should only be accessed through atomic methods. + // TODO: switch this to an atomic.Bool once the client is exclusively buit with go1.19+ + closing uint32 } var _ net.Conn = &conn{} -// Write sends the data thru the connection over proxy service +// Write sends the data through the connection over proxy service func (c *conn) Write(data []byte) (n int, err error) { req := &client.Packet{ Type: client.PacketType_DATA, @@ -116,40 +123,23 @@ func (c *conn) SetWriteDeadline(t time.Time) error { return errors.New("not implemented") } -// Close closes the connection. It also sends CLOSE_REQ packet over -// proxy service to notify remote to drop the connection. +// Close closes the connection, sends best-effort close signal to proxy +// service, and frees resources. func (c *conn) Close() error { - klog.V(4).Infoln("closing connection") - if c.closeTunnel != nil { - defer c.closeTunnel() + old := atomic.SwapUint32(&c.closing, 1) + if old != 0 { + // prevent duplicate messages + return nil } + klog.V(4).Infoln("closing connection", "dialID", c.random, "connectionID", c.connID) + + defer c.tunnel.closeTunnel() - var req *client.Packet if c.connID != 0 { - req = &client.Packet{ - Type: client.PacketType_CLOSE_REQ, - Payload: &client.Packet_CloseRequest{ - CloseRequest: &client.CloseRequest{ - ConnectID: c.connID, - }, - }, - } + c.tunnel.sendCloseRequest(c.connID) } else { // Never received a DIAL response so no connection ID. - req = &client.Packet{ - Type: client.PacketType_DIAL_CLS, - Payload: &client.Packet_CloseDial{ - CloseDial: &client.CloseDial{ - Random: c.random, - }, - }, - } - } - - klog.V(5).InfoS("[tracing] send req", "type", req.Type) - - if err := c.tunnel.Send(req); err != nil { - return err + c.tunnel.sendDialClose(c.random) } select { @@ -158,6 +148,8 @@ func (c *conn) Close() error { return errors.New(errMsg) } return nil + case <-c.tunnel.Done(): + return errConnTunnelClosed case <-time.After(CloseTimeout): } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile index 44f00b3be..e0efaecec 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile @@ -63,12 +63,12 @@ vet: golangci-lint ## Run go vet against code. .PHONY: build TYPESCAFFOLD = $(LOCALBIN)/typescaffold CLIENTGEN = $(LOCALBIN)/client-gen -build: fmt vet ## Build manager binary. +build: ## Build manager binary. pushd client-gen; CGO_ENABLED=0 go build -o ../bin/client-gen ./cmd/client-gen/ ;popd pushd client-gen; CGO_ENABLED=0 go build -o ../bin/typescaffold ./cmd/typescaffold/;popd .PHONY: generate -generate: install-dependencies build generatecode generateimpl vet-all +generate: install-dependencies build generatecode generateimpl fmt vet-all .PHONY: generatecode generatecode: build ## Generate client @@ -96,6 +96,7 @@ generatecode: build ## Generate client $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource IPGroup --client-name IPGroupsClient --verbs get,createorupdate,delete,listbyrg --expand --ratelimitkey ipGroupRateLimit $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage --package-alias armstorage --resource Account --client-name AccountsClient --verbs listbyrg --expand $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns --package-alias armprivatedns --resource PrivateZone --client-name PrivateZonesClient --verbs get,createorupdate --ratelimitkey privateDNSRateLimit + $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns --package-alias armprivatedns --resource PrivateZone --subresource VirtualNetworkLink --client-name VirtualNetworkLinksClient --verbs get,createorupdate --ratelimitkey virtualNetworkRateLimit $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage --package-alias armstorage --resource Account --subresource FileShare --client-name FileSharesClient --verbs get,createorupdate,delete,list $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage --package-alias armstorage --resource Account --subresource BlobContainer --client-name BlobContainersClient --verbs get,list $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage --package-alias armstorage --resource BlobServiceProperties --client-name BlobServicesClient @@ -106,7 +107,7 @@ generatecode: build ## Generate client .PHONY: generateimpl generateimpl: build ## Generate client - $(CLIENTGEN) clientgen:headerFile=../../hack/boilerplate/boilerplate.gomock.txt paths=./... + PATH=$(LOCALBIN):$$PATH $(CLIENTGEN) clientgen:headerFile=../../hack/boilerplate/boilerplate.gomock.txt paths=./... .PHONY: vet-all vet-all: golangci-lint ## Run go vet against code. @@ -137,7 +138,7 @@ MOCKGEN ?= $(LOCALBIN)/mockgen .PHONY: mockgen mockgen: $(MOCKGEN) ## Download mockgen locally if necessary. $(MOCKGEN): $(LOCALBIN) - test -s $(LOCALBIN)/mockgen || GOBIN=$(LOCALBIN) go install github.com/golang/mock/mockgen@v1.6.0 + test -s $(LOCALBIN)/mockgen || GOBIN=$(LOCALBIN) go install go.uber.org/mock/mockgen@latest GINKGO ?= $(LOCALBIN)/ginkgo .PHONY: ginkgo diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/interface.go index 9cc97b01a..0a8ff91de 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/interface.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=listbyrg,resource=Account,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=AccountsClient,expand=true +// +azure:client:verbs=listbyrg,resource=Account,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=AccountsClient,expand=true,crossSubFactory=true type Interface interface { utils.ListFunc[armstorage.Account] Create(ctx context.Context, resourceGroupName string, accountName string, resource *armstorage.AccountCreateParameters) (*armstorage.Account, error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/zz_generated_client.go index 119d785f3..33565d250 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,26 +32,36 @@ import ( type Client struct { *armstorage.AccountsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armstorage.NewAccountsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + AccountsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const ListOperationName = "AccountsClient.List" + // List gets a list of Account in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armstorage.Account, rerr error) { ctx = utils.ContextWithClientName(ctx, "AccountsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.AccountsClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go index 4fd05e8a9..e533cf669 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go @@ -21,6 +21,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go index 1bc5c5d3c..5c519c141 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go @@ -17,8 +17,6 @@ limitations under the License. package azclient import ( - "crypto/rsa" - "crypto/x509" "fmt" "os" "strings" @@ -26,12 +24,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "golang.org/x/crypto/pkcs12" -) - -var ( - // ErrorNoAuth indicates that no credentials are provided. - ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider") ) type AuthProvider struct { @@ -119,7 +111,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client // ClientCertificateCredential is used for client certificate var clientCertificateCredential azcore.TokenCredential - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + if len(config.AADClientCertPath) > 0 { credOptions := &azidentity.ClientCertificateCredentialOptions{ ClientOptions: *clientOption, SendCertificateChain: true, @@ -128,11 +120,11 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) } - certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) + certificate, privateKey, err := azidentity.ParseCertificates(certData, []byte(config.AADClientCertPassword)) if err != nil { return nil, fmt.Errorf("decoding the client certificate: %w", err) } - clientCertificateCredential, err = azidentity.NewClientCertificateCredential(armConfig.GetTenantID(), config.GetAADClientID(), []*x509.Certificate{certificate}, privateKey, credOptions) + clientCertificateCredential, err = azidentity.NewClientCertificateCredential(armConfig.GetTenantID(), config.GetAADClientID(), certificate, privateKey, credOptions) if err != nil { return nil, err } @@ -148,48 +140,33 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client }, nil } -func (factory *AuthProvider) GetAzIdentity() (azcore.TokenCredential, error) { +func (factory *AuthProvider) GetAzIdentity() azcore.TokenCredential { switch true { case factory.FederatedIdentityCredential != nil: - return factory.FederatedIdentityCredential, nil + return factory.FederatedIdentityCredential case factory.ManagedIdentityCredential != nil: - return factory.ManagedIdentityCredential, nil + return factory.ManagedIdentityCredential case factory.ClientSecretCredential != nil: - return factory.ClientSecretCredential, nil + return factory.ClientSecretCredential case factory.ClientCertificateCredential != nil: - return factory.ClientCertificateCredential, nil + return factory.ClientCertificateCredential default: - return nil, ErrorNoAuth - } -} - -// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and -// the private RSA key -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %w", err) + return nil } - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") - } - - return certificate, rsaPrivateKey, nil } -func (factory *AuthProvider) GetNetworkAzIdentity() (azcore.TokenCredential, error) { +func (factory *AuthProvider) GetNetworkAzIdentity() azcore.TokenCredential { if factory.NetworkClientSecretCredential != nil { - return factory.NetworkClientSecretCredential, nil + return factory.NetworkClientSecretCredential } - return nil, ErrorNoAuth + return nil } -func (factory *AuthProvider) GetMultiTenantIdentity() (azcore.TokenCredential, error) { +func (factory *AuthProvider) GetMultiTenantIdentity() azcore.TokenCredential { if factory.MultiTenantCredential != nil { - return factory.MultiTenantCredential, nil + return factory.MultiTenantCredential } - return nil, ErrorNoAuth + return nil } func (factory *AuthProvider) IsMultiTenantModeEnabled() bool { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go index 23c34b5e7..da4a9eda4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcompute.AvailabilitySetsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewAvailabilitySetsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + AvailabilitySetsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "AvailabilitySetsClient.Get" + // Get gets the AvailabilitySet func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.AvailabilitySet, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.AvailabilitySetsClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.AvailabilitySet, nil } +const ListOperationName = "AvailabilitySetsClient.List" + // List gets a list of AvailabilitySet in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.AvailabilitySet, rerr error) { ctx = utils.ContextWithClientName(ctx, "AvailabilitySetsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.AvailabilitySetsClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/interface.go index b8fc8ae9d..39576c3f1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/interface.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get,resource=Account,subResource=BlobContainer,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=BlobContainersClient,expand=false +// +azure:client:verbs=get,resource=Account,subResource=BlobContainer,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=BlobContainersClient,expand=false,crossSubFactory=true type Interface interface { utils.SubResourceGetFunc[armstorage.BlobContainer] CreateContainer(ctx context.Context, resourceGroupName, accountName, containerName string, parameters armstorage.BlobContainer) (*armstorage.BlobContainer, error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/zz_generated_client.go index 031ea2ea6..be49be1a0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armstorage.BlobContainersClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armstorage.NewBlobContainersClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + BlobContainersClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "BlobContainersClient.Get" + // Get gets the BlobContainer func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armstorage.BlobContainer, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.BlobContainersClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient/zz_generated_client.go index 0f67ac815..8e21f1db0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient/zz_generated_client.go @@ -20,6 +20,7 @@ package blobservicepropertiesclient import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -28,16 +29,22 @@ import ( type Client struct { *armstorage.BlobServicesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armstorage.NewBlobServicesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + BlobServicesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go index 7a98a8c49..76c907519 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,26 +32,36 @@ import ( type Client struct { *resources.DeploymentsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := resources.NewDeploymentsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + DeploymentsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const DeleteOperationName = "DeploymentsClient.Delete" + // Delete deletes a Deployment by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "DeploymentsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go index 41d215c8f..af3d0a903 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get;createorupdate;delete;listbyrg,resource=Disk,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=DisksClient,expand=false,rateLimitKey=diskRateLimit +// +azure:client:verbs=get;createorupdate;delete;listbyrg,resource=Disk,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=DisksClient,expand=false,rateLimitKey=diskRateLimit,crossSubFactory=true type Interface interface { utils.GetFunc[armcompute.Disk] utils.CreateOrUpdateFunc[armcompute.Disk] diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go index a676d5129..4fbe10e6e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcompute.DisksClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewDisksClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + DisksClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "DisksClient.Get" + // Get gets the Disk func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.Disk, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.DisksClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.Disk, nil } +const CreateOrUpdateOperationName = "DisksClient.Create" + // CreateOrUpdate creates or updates a Disk. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Disk) (*armcompute.Disk, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Disk) (result *armcompute.Disk, err error) { ctx = utils.ContextWithClientName(ctx, "DisksClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.DisksClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -75,22 +91,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "DisksClient.Delete" + // Delete deletes a Disk by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "DisksClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "DisksClient.List" + // List gets a list of Disk in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.Disk, rerr error) { ctx = utils.ContextWithClientName(ctx, "DisksClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.DisksClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go index 2e7e735be..d2b159e57 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go @@ -48,16 +48,21 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient" ) type ClientFactory interface { GetAccountClient() accountclient.Interface + GetAccountClientForSub(subscriptionID string) (accountclient.Interface, error) GetAvailabilitySetClient() availabilitysetclient.Interface GetBlobContainerClient() blobcontainerclient.Interface + GetBlobContainerClientForSub(subscriptionID string) (blobcontainerclient.Interface, error) GetBlobServicePropertiesClient() blobservicepropertiesclient.Interface GetDeploymentClient() deploymentclient.Interface GetDiskClient() diskclient.Interface + GetDiskClientForSub(subscriptionID string) (diskclient.Interface, error) GetFileShareClient() fileshareclient.Interface + GetFileShareClientForSub(subscriptionID string) (fileshareclient.Interface, error) GetInterfaceClient() interfaceclient.Interface GetIPGroupClient() ipgroupclient.Interface GetLoadBalancerClient() loadbalancerclient.Interface @@ -81,4 +86,5 @@ type ClientFactory interface { GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface GetVirtualNetworkClient() virtualnetworkclient.Interface + GetVirtualNetworkLinkClient() virtualnetworklinkclient.Interface } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go index 767b16283..c1aed81d6 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go @@ -18,9 +18,11 @@ package azclient import ( + "strings" + "sync" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" @@ -54,18 +56,21 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient" ) type ClientFactoryImpl struct { - *ClientFactoryConfig + armConfig *ARMClientConfig + facotryConfig *ClientFactoryConfig cred azcore.TokenCredential - accountclientInterface accountclient.Interface + clientOptionsMutFn []func(option *arm.ClientOptions) + accountclientInterface sync.Map availabilitysetclientInterface availabilitysetclient.Interface - blobcontainerclientInterface blobcontainerclient.Interface + blobcontainerclientInterface sync.Map blobservicepropertiesclientInterface blobservicepropertiesclient.Interface deploymentclientInterface deploymentclient.Interface - diskclientInterface diskclient.Interface - fileshareclientInterface fileshareclient.Interface + diskclientInterface sync.Map + fileshareclientInterface sync.Map interfaceclientInterface interfaceclient.Interface ipgroupclientInterface ipgroupclient.Interface loadbalancerclientInterface loadbalancerclient.Interface @@ -89,6 +94,7 @@ type ClientFactoryImpl struct { virtualmachinescalesetclientInterface virtualmachinescalesetclient.Interface virtualmachinescalesetvmclientInterface virtualmachinescalesetvmclient.Interface virtualnetworkclientInterface virtualnetworkclient.Interface + virtualnetworklinkclientInterface virtualnetworklinkclient.Interface } func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, cred azcore.TokenCredential, clientOptionsMutFn ...func(option *arm.ClientOptions)) (ClientFactory, error) { @@ -99,715 +105,964 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c cred = &azidentity.DefaultAzureCredential{} } - var options *arm.ClientOptions var err error - //initialize {accountclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient Account Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) + factory := &ClientFactoryImpl{ + armConfig: armConfig, + facotryConfig: config, + cred: cred, + clientOptionsMutFn: clientOptionsMutFn, + } + + //initialize accountclient + _, err = factory.GetAccountClientForSub(config.SubscriptionID) if err != nil { return nil, err } - var ratelimitOption *ratelimit.Config - var rateLimitPolicy policy.Policy - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } - } - accountclientInterface, err := accountclient.New(config.SubscriptionID, cred, options) + //initialize availabilitysetclient + factory.availabilitysetclientInterface, err = factory.createAvailabilitySetClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {availabilitysetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient AvailabilitySet Interface availabilitySetRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize blobcontainerclient + _, err = factory.GetBlobContainerClientForSub(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("availabilitySetRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) - } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize blobservicepropertiesclient + factory.blobservicepropertiesclientInterface, err = factory.createBlobServicePropertiesClient(config.SubscriptionID) + if err != nil { + return nil, err } - availabilitysetclientInterface, err := availabilitysetclient.New(config.SubscriptionID, cred, options) + + //initialize deploymentclient + factory.deploymentclientInterface, err = factory.createDeploymentClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {blobcontainerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient Account BlobContainer Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize diskclient + _, err = factory.GetDiskClientForSub(config.SubscriptionID) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize fileshareclient + _, err = factory.GetFileShareClientForSub(config.SubscriptionID) + if err != nil { + return nil, err } - blobcontainerclientInterface, err := blobcontainerclient.New(config.SubscriptionID, cred, options) + + //initialize interfaceclient + factory.interfaceclientInterface, err = factory.createInterfaceClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {blobservicepropertiesclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient BlobServiceProperties Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize ipgroupclient + factory.ipgroupclientInterface, err = factory.createIPGroupClient(config.SubscriptionID) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize loadbalancerclient + factory.loadbalancerclientInterface, err = factory.createLoadBalancerClient(config.SubscriptionID) + if err != nil { + return nil, err } - blobservicepropertiesclientInterface, err := blobservicepropertiesclient.New(config.SubscriptionID, cred, options) + + //initialize managedclusterclient + factory.managedclusterclientInterface, err = factory.createManagedClusterClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {deploymentclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient Deployment Interface deploymentRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize privateendpointclient + factory.privateendpointclientInterface, err = factory.createPrivateEndpointClient(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("deploymentRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + + //initialize privatelinkserviceclient + factory.privatelinkserviceclientInterface, err = factory.createPrivateLinkServiceClient(config.SubscriptionID) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize privatezoneclient + factory.privatezoneclientInterface, err = factory.createPrivateZoneClient(config.SubscriptionID) + if err != nil { + return nil, err } - deploymentclientInterface, err := deploymentclient.New(config.SubscriptionID, cred, options) + + //initialize providerclient + factory.providerclientInterface, err = factory.createProviderClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {diskclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient Disk Interface diskRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize publicipaddressclient + factory.publicipaddressclientInterface, err = factory.createPublicIPAddressClient(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("diskRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + + //initialize publicipprefixclient + factory.publicipprefixclientInterface, err = factory.createPublicIPPrefixClient(config.SubscriptionID) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize registryclient + factory.registryclientInterface, err = factory.createRegistryClient(config.SubscriptionID) + if err != nil { + return nil, err } - diskclientInterface, err := diskclient.New(config.SubscriptionID, cred, options) + + //initialize resourcegroupclient + factory.resourcegroupclientInterface, err = factory.createResourceGroupClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {fileshareclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient Account FileShare Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize routetableclient + factory.routetableclientInterface, err = factory.createRouteTableClient(config.SubscriptionID) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize secretclient + factory.secretclientInterface, err = factory.createSecretClient(config.SubscriptionID) + if err != nil { + return nil, err } - fileshareclientInterface, err := fileshareclient.New(config.SubscriptionID, cred, options) + + //initialize securitygroupclient + factory.securitygroupclientInterface, err = factory.createSecurityGroupClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {interfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient Interface Interface interfaceRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize snapshotclient + factory.snapshotclientInterface, err = factory.createSnapshotClient(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("interfaceRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + + //initialize sshpublickeyresourceclient + factory.sshpublickeyresourceclientInterface, err = factory.createSSHPublicKeyResourceClient(config.SubscriptionID) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize subnetclient + factory.subnetclientInterface, err = factory.createSubnetClient(config.SubscriptionID) + if err != nil { + return nil, err } - interfaceclientInterface, err := interfaceclient.New(config.SubscriptionID, cred, options) + + //initialize vaultclient + factory.vaultclientInterface, err = factory.createVaultClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {ipgroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient IPGroup Interface ipGroupRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize virtualmachineclient + factory.virtualmachineclientInterface, err = factory.createVirtualMachineClient(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("ipGroupRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + + //initialize virtualmachinescalesetclient + factory.virtualmachinescalesetclientInterface, err = factory.createVirtualMachineScaleSetClient(config.SubscriptionID) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { - if optionMutFn != nil { - optionMutFn(options) - } + + //initialize virtualmachinescalesetvmclient + factory.virtualmachinescalesetvmclientInterface, err = factory.createVirtualMachineScaleSetVMClient(config.SubscriptionID) + if err != nil { + return nil, err } - ipgroupclientInterface, err := ipgroupclient.New(config.SubscriptionID, cred, options) + + //initialize virtualnetworkclient + factory.virtualnetworkclientInterface, err = factory.createVirtualNetworkClient(config.SubscriptionID) if err != nil { return nil, err } - //initialize {loadbalancerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient LoadBalancer Interface loadBalancerRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + //initialize virtualnetworklinkclient + factory.virtualnetworklinkclientInterface, err = factory.createVirtualNetworkLinkClient(config.SubscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("loadBalancerRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + return factory, nil +} + +func (factory *ClientFactoryImpl) createAccountClient(subscription string) (accountclient.Interface, error) { + //initialize accountclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - loadbalancerclientInterface, err := loadbalancerclient.New(config.SubscriptionID, cred, options) + return accountclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetAccountClient() accountclient.Interface { + clientImp, _ := factory.accountclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(accountclient.Interface) +} +func (factory *ClientFactoryImpl) GetAccountClientForSub(subscriptionID string) (accountclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.accountclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(accountclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createAccountClient(subscriptionID) if err != nil { return nil, err } + factory.accountclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(accountclient.Interface), nil +} - //initialize {managedclusterclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient ManagedCluster Interface containerServiceRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createAvailabilitySetClient(subscription string) (availabilitysetclient.Interface, error) { + //initialize availabilitysetclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("containerServiceRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("availabilitySetRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - managedclusterclientInterface, err := managedclusterclient.New(config.SubscriptionID, cred, options) + return availabilitysetclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetAvailabilitySetClient() availabilitysetclient.Interface { + return factory.availabilitysetclientInterface +} + +func (factory *ClientFactoryImpl) createBlobContainerClient(subscription string) (blobcontainerclient.Interface, error) { + //initialize blobcontainerclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {privateendpointclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient PrivateEndpoint Interface privateEndpointRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return blobcontainerclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetBlobContainerClient() blobcontainerclient.Interface { + clientImp, _ := factory.blobcontainerclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(blobcontainerclient.Interface) +} +func (factory *ClientFactoryImpl) GetBlobContainerClientForSub(subscriptionID string) (blobcontainerclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.blobcontainerclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(blobcontainerclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createBlobContainerClient(subscriptionID) if err != nil { return nil, err } - //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("privateEndpointRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) - if rateLimitPolicy != nil { - options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + factory.blobcontainerclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(blobcontainerclient.Interface), nil +} + +func (factory *ClientFactoryImpl) createBlobServicePropertiesClient(subscription string) (blobservicepropertiesclient.Interface, error) { + //initialize blobservicepropertiesclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - privateendpointclientInterface, err := privateendpointclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return blobservicepropertiesclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetBlobServicePropertiesClient() blobservicepropertiesclient.Interface { + return factory.blobservicepropertiesclientInterface +} - //initialize {privatelinkserviceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient PrivateLinkService Interface privateLinkServiceRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createDeploymentClient(subscription string) (deploymentclient.Interface, error) { + //initialize deploymentclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("privateLinkServiceRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("deploymentRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - privatelinkserviceclientInterface, err := privatelinkserviceclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return deploymentclient.New(subscription, factory.cred, options) +} - //initialize {privatezoneclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient PrivateZone Interface privateDNSRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) GetDeploymentClient() deploymentclient.Interface { + return factory.deploymentclientInterface +} + +func (factory *ClientFactoryImpl) createDiskClient(subscription string) (diskclient.Interface, error) { + //initialize diskclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("privateDNSRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("diskRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - privatezoneclientInterface, err := privatezoneclient.New(config.SubscriptionID, cred, options) + return diskclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetDiskClient() diskclient.Interface { + clientImp, _ := factory.diskclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(diskclient.Interface) +} +func (factory *ClientFactoryImpl) GetDiskClientForSub(subscriptionID string) (diskclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.diskclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(diskclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createDiskClient(subscriptionID) if err != nil { return nil, err } + factory.diskclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(diskclient.Interface), nil +} - //initialize {providerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient Provider Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createFileShareClient(subscription string) (fileshareclient.Interface, error) { + //initialize fileshareclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - providerclientInterface, err := providerclient.New(config.SubscriptionID, cred, options) + return fileshareclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetFileShareClient() fileshareclient.Interface { + clientImp, _ := factory.fileshareclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(fileshareclient.Interface) +} +func (factory *ClientFactoryImpl) GetFileShareClientForSub(subscriptionID string) (fileshareclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.fileshareclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(fileshareclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createFileShareClient(subscriptionID) if err != nil { return nil, err } + factory.fileshareclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(fileshareclient.Interface), nil +} - //initialize {publicipaddressclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient PublicIPAddress Interface publicIPAddressRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createInterfaceClient(subscription string) (interfaceclient.Interface, error) { + //initialize interfaceclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("publicIPAddressRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("interfaceRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - publicipaddressclientInterface, err := publicipaddressclient.New(config.SubscriptionID, cred, options) + return interfaceclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetInterfaceClient() interfaceclient.Interface { + return factory.interfaceclientInterface +} + +func (factory *ClientFactoryImpl) createIPGroupClient(subscription string) (ipgroupclient.Interface, error) { + //initialize ipgroupclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {publicipprefixclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient PublicIPPrefix Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) - if err != nil { - return nil, err + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("ipGroupRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - publicipprefixclientInterface, err := publicipprefixclient.New(config.SubscriptionID, cred, options) + return ipgroupclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetIPGroupClient() ipgroupclient.Interface { + return factory.ipgroupclientInterface +} + +func (factory *ClientFactoryImpl) createLoadBalancerClient(subscription string) (loadbalancerclient.Interface, error) { + //initialize loadbalancerclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {registryclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient Registry Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) - if err != nil { - return nil, err + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("loadBalancerRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - registryclientInterface, err := registryclient.New(config.SubscriptionID, cred, options) + return loadbalancerclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetLoadBalancerClient() loadbalancerclient.Interface { + return factory.loadbalancerclientInterface +} + +func (factory *ClientFactoryImpl) createManagedClusterClient(subscription string) (managedclusterclient.Interface, error) { + //initialize managedclusterclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {resourcegroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient ResourceGroup Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) - if err != nil { - return nil, err + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("containerServiceRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - resourcegroupclientInterface, err := resourcegroupclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return managedclusterclient.New(subscription, factory.cred, options) +} - //initialize {routetableclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient RouteTable Interface routeTableRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) GetManagedClusterClient() managedclusterclient.Interface { + return factory.managedclusterclientInterface +} + +func (factory *ClientFactoryImpl) createPrivateEndpointClient(subscription string) (privateendpointclient.Interface, error) { + //initialize privateendpointclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("routeTableRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("privateEndpointRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - routetableclientInterface, err := routetableclient.New(config.SubscriptionID, cred, options) + return privateendpointclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetPrivateEndpointClient() privateendpointclient.Interface { + return factory.privateendpointclientInterface +} + +func (factory *ClientFactoryImpl) createPrivateLinkServiceClient(subscription string) (privatelinkserviceclient.Interface, error) { + //initialize privatelinkserviceclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {secretclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient Vault Secret Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) - if err != nil { - return nil, err + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("privateLinkServiceRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - secretclientInterface, err := secretclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return privatelinkserviceclient.New(subscription, factory.cred, options) +} - //initialize {securitygroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient SecurityGroup Interface securityGroupRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) GetPrivateLinkServiceClient() privatelinkserviceclient.Interface { + return factory.privatelinkserviceclientInterface +} + +func (factory *ClientFactoryImpl) createPrivateZoneClient(subscription string) (privatezoneclient.Interface, error) { + //initialize privatezoneclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("securityGroupRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("privateDNSRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - securitygroupclientInterface, err := securitygroupclient.New(config.SubscriptionID, cred, options) + return privatezoneclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetPrivateZoneClient() privatezoneclient.Interface { + return factory.privatezoneclientInterface +} + +func (factory *ClientFactoryImpl) createProviderClient(subscription string) (providerclient.Interface, error) { + //initialize providerclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {snapshotclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient Snapshot Interface snapshotRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return providerclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetProviderClient() providerclient.Interface { + return factory.providerclientInterface +} + +func (factory *ClientFactoryImpl) createPublicIPAddressClient(subscription string) (publicipaddressclient.Interface, error) { + //initialize publicipaddressclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("snapshotRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("publicIPAddressRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - snapshotclientInterface, err := snapshotclient.New(config.SubscriptionID, cred, options) + return publicipaddressclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetPublicIPAddressClient() publicipaddressclient.Interface { + return factory.publicipaddressclientInterface +} + +func (factory *ClientFactoryImpl) createPublicIPPrefixClient(subscription string) (publicipprefixclient.Interface, error) { + //initialize publicipprefixclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {sshpublickeyresourceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient SSHPublicKeyResource Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return publicipprefixclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetPublicIPPrefixClient() publicipprefixclient.Interface { + return factory.publicipprefixclientInterface +} + +func (factory *ClientFactoryImpl) createRegistryClient(subscription string) (registryclient.Interface, error) { + //initialize registryclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - sshpublickeyresourceclientInterface, err := sshpublickeyresourceclient.New(config.SubscriptionID, cred, options) + return registryclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetRegistryClient() registryclient.Interface { + return factory.registryclientInterface +} + +func (factory *ClientFactoryImpl) createResourceGroupClient(subscription string) (resourcegroupclient.Interface, error) { + //initialize resourcegroupclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {subnetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient VirtualNetwork Subnet Interface subnetsRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return resourcegroupclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetResourceGroupClient() resourcegroupclient.Interface { + return factory.resourcegroupclientInterface +} + +func (factory *ClientFactoryImpl) createRouteTableClient(subscription string) (routetableclient.Interface, error) { + //initialize routetableclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("subnetsRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("routeTableRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - subnetclientInterface, err := subnetclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return routetableclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetRouteTableClient() routetableclient.Interface { + return factory.routetableclientInterface +} - //initialize {vaultclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient Vault Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createSecretClient(subscription string) (secretclient.Interface, error) { + //initialize secretclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - vaultclientInterface, err := vaultclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return secretclient.New(subscription, factory.cred, options) +} - //initialize {virtualmachineclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient VirtualMachine Interface virtualMachineRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) GetSecretClient() secretclient.Interface { + return factory.secretclientInterface +} + +func (factory *ClientFactoryImpl) createSecurityGroupClient(subscription string) (securitygroupclient.Interface, error) { + //initialize securitygroupclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("virtualMachineRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("securityGroupRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - virtualmachineclientInterface, err := virtualmachineclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return securitygroupclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetSecurityGroupClient() securitygroupclient.Interface { + return factory.securitygroupclientInterface +} - //initialize {virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient VirtualMachineScaleSet Interface virtualMachineSizesRateLimit} - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createSnapshotClient(subscription string) (snapshotclient.Interface, error) { + //initialize snapshotclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } + //add ratelimit policy - ratelimitOption = config.GetRateLimitConfig("virtualMachineSizesRateLimit") - rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption) + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("snapshotRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) if rateLimitPolicy != nil { options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - virtualmachinescalesetclientInterface, err := virtualmachinescalesetclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } + return snapshotclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface { + return factory.snapshotclientInterface +} - //initialize {virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient VirtualMachineScaleSet VirtualMachineScaleSetVM Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) +func (factory *ClientFactoryImpl) createSSHPublicKeyResourceClient(subscription string) (sshpublickeyresourceclient.Interface, error) { + //initialize sshpublickeyresourceclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - for _, optionMutFn := range clientOptionsMutFn { + + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - virtualmachinescalesetvmclientInterface, err := virtualmachinescalesetvmclient.New(config.SubscriptionID, cred, options) + return sshpublickeyresourceclient.New(subscription, factory.cred, options) +} + +func (factory *ClientFactoryImpl) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface { + return factory.sshpublickeyresourceclientInterface +} + +func (factory *ClientFactoryImpl) createSubnetClient(subscription string) (subnetclient.Interface, error) { + //initialize subnetclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) if err != nil { return nil, err } - //initialize {virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient VirtualNetwork Interface } - options, err = GetDefaultResourceClientOption(armConfig, config) - if err != nil { - return nil, err + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("subnetsRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) } - for _, optionMutFn := range clientOptionsMutFn { + for _, optionMutFn := range factory.clientOptionsMutFn { if optionMutFn != nil { optionMutFn(options) } } - virtualnetworkclientInterface, err := virtualnetworkclient.New(config.SubscriptionID, cred, options) - if err != nil { - return nil, err - } - - return &ClientFactoryImpl{ - ClientFactoryConfig: config, - cred: cred, accountclientInterface: accountclientInterface, - availabilitysetclientInterface: availabilitysetclientInterface, - blobcontainerclientInterface: blobcontainerclientInterface, - blobservicepropertiesclientInterface: blobservicepropertiesclientInterface, - deploymentclientInterface: deploymentclientInterface, - diskclientInterface: diskclientInterface, - fileshareclientInterface: fileshareclientInterface, - interfaceclientInterface: interfaceclientInterface, - ipgroupclientInterface: ipgroupclientInterface, - loadbalancerclientInterface: loadbalancerclientInterface, - managedclusterclientInterface: managedclusterclientInterface, - privateendpointclientInterface: privateendpointclientInterface, - privatelinkserviceclientInterface: privatelinkserviceclientInterface, - privatezoneclientInterface: privatezoneclientInterface, - providerclientInterface: providerclientInterface, - publicipaddressclientInterface: publicipaddressclientInterface, - publicipprefixclientInterface: publicipprefixclientInterface, - registryclientInterface: registryclientInterface, - resourcegroupclientInterface: resourcegroupclientInterface, - routetableclientInterface: routetableclientInterface, - secretclientInterface: secretclientInterface, - securitygroupclientInterface: securitygroupclientInterface, - snapshotclientInterface: snapshotclientInterface, - sshpublickeyresourceclientInterface: sshpublickeyresourceclientInterface, - subnetclientInterface: subnetclientInterface, - vaultclientInterface: vaultclientInterface, - virtualmachineclientInterface: virtualmachineclientInterface, - virtualmachinescalesetclientInterface: virtualmachinescalesetclientInterface, - virtualmachinescalesetvmclientInterface: virtualmachinescalesetvmclientInterface, - virtualnetworkclientInterface: virtualnetworkclientInterface, - }, nil -} - -func (factory *ClientFactoryImpl) GetAccountClient() accountclient.Interface { - return factory.accountclientInterface -} - -func (factory *ClientFactoryImpl) GetAvailabilitySetClient() availabilitysetclient.Interface { - return factory.availabilitysetclientInterface -} - -func (factory *ClientFactoryImpl) GetBlobContainerClient() blobcontainerclient.Interface { - return factory.blobcontainerclientInterface -} - -func (factory *ClientFactoryImpl) GetBlobServicePropertiesClient() blobservicepropertiesclient.Interface { - return factory.blobservicepropertiesclientInterface -} - -func (factory *ClientFactoryImpl) GetDeploymentClient() deploymentclient.Interface { - return factory.deploymentclientInterface -} - -func (factory *ClientFactoryImpl) GetDiskClient() diskclient.Interface { - return factory.diskclientInterface -} - -func (factory *ClientFactoryImpl) GetFileShareClient() fileshareclient.Interface { - return factory.fileshareclientInterface -} - -func (factory *ClientFactoryImpl) GetInterfaceClient() interfaceclient.Interface { - return factory.interfaceclientInterface -} - -func (factory *ClientFactoryImpl) GetIPGroupClient() ipgroupclient.Interface { - return factory.ipgroupclientInterface + return subnetclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetLoadBalancerClient() loadbalancerclient.Interface { - return factory.loadbalancerclientInterface -} - -func (factory *ClientFactoryImpl) GetManagedClusterClient() managedclusterclient.Interface { - return factory.managedclusterclientInterface -} - -func (factory *ClientFactoryImpl) GetPrivateEndpointClient() privateendpointclient.Interface { - return factory.privateendpointclientInterface +func (factory *ClientFactoryImpl) GetSubnetClient() subnetclient.Interface { + return factory.subnetclientInterface } -func (factory *ClientFactoryImpl) GetPrivateLinkServiceClient() privatelinkserviceclient.Interface { - return factory.privatelinkserviceclientInterface -} +func (factory *ClientFactoryImpl) createVaultClient(subscription string) (vaultclient.Interface, error) { + //initialize vaultclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetPrivateZoneClient() privatezoneclient.Interface { - return factory.privatezoneclientInterface + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return vaultclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetProviderClient() providerclient.Interface { - return factory.providerclientInterface +func (factory *ClientFactoryImpl) GetVaultClient() vaultclient.Interface { + return factory.vaultclientInterface } -func (factory *ClientFactoryImpl) GetPublicIPAddressClient() publicipaddressclient.Interface { - return factory.publicipaddressclientInterface -} +func (factory *ClientFactoryImpl) createVirtualMachineClient(subscription string) (virtualmachineclient.Interface, error) { + //initialize virtualmachineclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetPublicIPPrefixClient() publicipprefixclient.Interface { - return factory.publicipprefixclientInterface + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("virtualMachineRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + } + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return virtualmachineclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetRegistryClient() registryclient.Interface { - return factory.registryclientInterface +func (factory *ClientFactoryImpl) GetVirtualMachineClient() virtualmachineclient.Interface { + return factory.virtualmachineclientInterface } -func (factory *ClientFactoryImpl) GetResourceGroupClient() resourcegroupclient.Interface { - return factory.resourcegroupclientInterface -} +func (factory *ClientFactoryImpl) createVirtualMachineScaleSetClient(subscription string) (virtualmachinescalesetclient.Interface, error) { + //initialize virtualmachinescalesetclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetRouteTableClient() routetableclient.Interface { - return factory.routetableclientInterface + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("virtualMachineSizesRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + } + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return virtualmachinescalesetclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetSecretClient() secretclient.Interface { - return factory.secretclientInterface +func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface { + return factory.virtualmachinescalesetclientInterface } -func (factory *ClientFactoryImpl) GetSecurityGroupClient() securitygroupclient.Interface { - return factory.securitygroupclientInterface -} +func (factory *ClientFactoryImpl) createVirtualMachineScaleSetVMClient(subscription string) (virtualmachinescalesetvmclient.Interface, error) { + //initialize virtualmachinescalesetvmclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface { - return factory.snapshotclientInterface + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return virtualmachinescalesetvmclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface { - return factory.sshpublickeyresourceclientInterface +func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface { + return factory.virtualmachinescalesetvmclientInterface } -func (factory *ClientFactoryImpl) GetSubnetClient() subnetclient.Interface { - return factory.subnetclientInterface -} +func (factory *ClientFactoryImpl) createVirtualNetworkClient(subscription string) (virtualnetworkclient.Interface, error) { + //initialize virtualnetworkclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetVaultClient() vaultclient.Interface { - return factory.vaultclientInterface + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return virtualnetworkclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetVirtualMachineClient() virtualmachineclient.Interface { - return factory.virtualmachineclientInterface +func (factory *ClientFactoryImpl) GetVirtualNetworkClient() virtualnetworkclient.Interface { + return factory.virtualnetworkclientInterface } -func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface { - return factory.virtualmachinescalesetclientInterface -} +func (factory *ClientFactoryImpl) createVirtualNetworkLinkClient(subscription string) (virtualnetworklinkclient.Interface, error) { + //initialize virtualnetworklinkclient + options, err := GetDefaultResourceClientOption(factory.armConfig, factory.facotryConfig) + if err != nil { + return nil, err + } -func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface { - return factory.virtualmachinescalesetvmclientInterface + //add ratelimit policy + ratelimitOption := factory.facotryConfig.GetRateLimitConfig("virtualNetworkRateLimit") + rateLimitPolicy := ratelimit.NewRateLimitPolicy(ratelimitOption) + if rateLimitPolicy != nil { + options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy) + } + for _, optionMutFn := range factory.clientOptionsMutFn { + if optionMutFn != nil { + optionMutFn(options) + } + } + return virtualnetworklinkclient.New(subscription, factory.cred, options) } -func (factory *ClientFactoryImpl) GetVirtualNetworkClient() virtualnetworkclient.Interface { - return factory.virtualnetworkclientInterface +func (factory *ClientFactoryImpl) GetVirtualNetworkLinkClient() virtualnetworklinkclient.Interface { + return factory.virtualnetworklinkclientInterface } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/interface.go index e040d42ce..74a5b43c8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/interface.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get,resource=Account,subResource=FileShare,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=FileSharesClient,expand=false +// +azure:client:verbs=get,resource=Account,subResource=FileShare,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage,packageAlias=armstorage,clientName=FileSharesClient,expand=false,crossSubFactory=true type Interface interface { utils.SubResourceGetFunc[armstorage.FileShare] Create(ctx context.Context, resourceGroupName string, resourceName string, parentResourceName string, resource armstorage.FileShare) (*armstorage.FileShare, error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/zz_generated_client.go index 3b1c40a54..548f2ab67 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armstorage.FileSharesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armstorage.NewFileSharesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + FileSharesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "FileSharesClient.Get" + // Get gets the FileShare func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armstorage.FileShare, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.FileSharesClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go index b653ace2e..1828a0f67 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.InterfacesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewInterfacesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + InterfacesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "InterfacesClient.Get" + // Get gets the Interface func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.Interface, rerr error) { var ops *armnetwork.InterfacesClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.InterfacesClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.Interface, nil } +const CreateOrUpdateOperationName = "InterfacesClient.Create" + // CreateOrUpdate creates or updates a Interface. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.Interface) (*armnetwork.Interface, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.Interface) (result *armnetwork.Interface, err error) { ctx = utils.ContextWithClientName(ctx, "InterfacesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.InterfacesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "InterfacesClient.Delete" + // Delete deletes a Interface by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "InterfacesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "InterfacesClient.List" + // List gets a list of Interface in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.Interface, rerr error) { ctx = utils.ContextWithClientName(ctx, "InterfacesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.InterfacesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go index 2dd6ec6a5..4f971d59c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.IPGroupsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewIPGroupsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + IPGroupsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "IPGroupsClient.Get" + // Get gets the IPGroup func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.IPGroup, rerr error) { var ops *armnetwork.IPGroupsClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.IPGroupsClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.IPGroup, nil } +const CreateOrUpdateOperationName = "IPGroupsClient.Create" + // CreateOrUpdate creates or updates a IPGroup. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.IPGroup) (*armnetwork.IPGroup, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.IPGroup) (result *armnetwork.IPGroup, err error) { ctx = utils.ContextWithClientName(ctx, "IPGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.IPGroupsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "IPGroupsClient.Delete" + // Delete deletes a IPGroup by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "IPGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "IPGroupsClient.List" + // List gets a list of IPGroup in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.IPGroup, rerr error) { ctx = utils.ContextWithClientName(ctx, "IPGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.IPGroupsClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go index 12b23c4b6..f11f919a4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.LoadBalancersClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewLoadBalancersClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + LoadBalancersClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "LoadBalancersClient.Get" + // Get gets the LoadBalancer func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.LoadBalancer, rerr error) { var ops *armnetwork.LoadBalancersClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.LoadBalancersClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.LoadBalancer, nil } +const CreateOrUpdateOperationName = "LoadBalancersClient.Create" + // CreateOrUpdate creates or updates a LoadBalancer. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.LoadBalancer) (*armnetwork.LoadBalancer, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.LoadBalancer) (result *armnetwork.LoadBalancer, err error) { ctx = utils.ContextWithClientName(ctx, "LoadBalancersClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.LoadBalancersClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "LoadBalancersClient.Delete" + // Delete deletes a LoadBalancer by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "LoadBalancersClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "LoadBalancersClient.List" + // List gets a list of LoadBalancer in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.LoadBalancer, rerr error) { ctx = utils.ContextWithClientName(ctx, "LoadBalancersClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.LoadBalancersClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go index c211727a3..e599146da 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcontainerservice.ManagedClustersClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcontainerservice.NewManagedClustersClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + ManagedClustersClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "ManagedClustersClient.Get" + // Get gets the ManagedCluster func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcontainerservice.ManagedCluster, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.ManagedClustersClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.ManagedCluster, nil } +const CreateOrUpdateOperationName = "ManagedClustersClient.Create" + // CreateOrUpdate creates or updates a ManagedCluster. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcontainerservice.ManagedCluster) (*armcontainerservice.ManagedCluster, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcontainerservice.ManagedCluster) (result *armcontainerservice.ManagedCluster, err error) { ctx = utils.ContextWithClientName(ctx, "ManagedClustersClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.ManagedClustersClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -75,22 +91,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "ManagedClustersClient.Delete" + // Delete deletes a ManagedCluster by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "ManagedClustersClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "ManagedClustersClient.List" + // List gets a list of ManagedCluster in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcontainerservice.ManagedCluster, rerr error) { ctx = utils.ContextWithClientName(ctx, "ManagedClustersClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.ManagedClustersClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go new file mode 100644 index 000000000..763b5b338 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go @@ -0,0 +1,581 @@ +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cloud-provider-azure/pkg/azclient (interfaces: ClientFactory) +// +// Generated by this command: +// +// mockgen -package mock_azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient ClientFactory +// + +// Package mock_azclient is a generated GoMock package. +package mock_azclient + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + + accountclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" + availabilitysetclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient" + blobcontainerclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient" + blobservicepropertiesclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient" + deploymentclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient" + diskclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient" + fileshareclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/fileshareclient" + interfaceclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient" + ipgroupclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient" + loadbalancerclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient" + managedclusterclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient" + privateendpointclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient" + privatelinkserviceclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient" + privatezoneclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient" + providerclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient" + publicipaddressclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient" + publicipprefixclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient" + registryclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient" + resourcegroupclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient" + routetableclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient" + secretclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient" + securitygroupclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient" + snapshotclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient" + sshpublickeyresourceclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient" + subnetclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient" + vaultclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient" + virtualmachineclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient" + virtualmachinescalesetclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient" + virtualmachinescalesetvmclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient" + virtualnetworkclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient" + virtualnetworklinkclient "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient" +) + +// MockClientFactory is a mock of ClientFactory interface. +type MockClientFactory struct { + ctrl *gomock.Controller + recorder *MockClientFactoryMockRecorder +} + +// MockClientFactoryMockRecorder is the mock recorder for MockClientFactory. +type MockClientFactoryMockRecorder struct { + mock *MockClientFactory +} + +// NewMockClientFactory creates a new mock instance. +func NewMockClientFactory(ctrl *gomock.Controller) *MockClientFactory { + mock := &MockClientFactory{ctrl: ctrl} + mock.recorder = &MockClientFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClientFactory) EXPECT() *MockClientFactoryMockRecorder { + return m.recorder +} + +// GetAccountClient mocks base method. +func (m *MockClientFactory) GetAccountClient() accountclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountClient") + ret0, _ := ret[0].(accountclient.Interface) + return ret0 +} + +// GetAccountClient indicates an expected call of GetAccountClient. +func (mr *MockClientFactoryMockRecorder) GetAccountClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountClient", reflect.TypeOf((*MockClientFactory)(nil).GetAccountClient)) +} + +// GetAccountClientForSub mocks base method. +func (m *MockClientFactory) GetAccountClientForSub(arg0 string) (accountclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountClientForSub", arg0) + ret0, _ := ret[0].(accountclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountClientForSub indicates an expected call of GetAccountClientForSub. +func (mr *MockClientFactoryMockRecorder) GetAccountClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetAccountClientForSub), arg0) +} + +// GetAvailabilitySetClient mocks base method. +func (m *MockClientFactory) GetAvailabilitySetClient() availabilitysetclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAvailabilitySetClient") + ret0, _ := ret[0].(availabilitysetclient.Interface) + return ret0 +} + +// GetAvailabilitySetClient indicates an expected call of GetAvailabilitySetClient. +func (mr *MockClientFactoryMockRecorder) GetAvailabilitySetClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAvailabilitySetClient", reflect.TypeOf((*MockClientFactory)(nil).GetAvailabilitySetClient)) +} + +// GetBlobContainerClient mocks base method. +func (m *MockClientFactory) GetBlobContainerClient() blobcontainerclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlobContainerClient") + ret0, _ := ret[0].(blobcontainerclient.Interface) + return ret0 +} + +// GetBlobContainerClient indicates an expected call of GetBlobContainerClient. +func (mr *MockClientFactoryMockRecorder) GetBlobContainerClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlobContainerClient", reflect.TypeOf((*MockClientFactory)(nil).GetBlobContainerClient)) +} + +// GetBlobContainerClientForSub mocks base method. +func (m *MockClientFactory) GetBlobContainerClientForSub(arg0 string) (blobcontainerclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlobContainerClientForSub", arg0) + ret0, _ := ret[0].(blobcontainerclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlobContainerClientForSub indicates an expected call of GetBlobContainerClientForSub. +func (mr *MockClientFactoryMockRecorder) GetBlobContainerClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlobContainerClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetBlobContainerClientForSub), arg0) +} + +// GetBlobServicePropertiesClient mocks base method. +func (m *MockClientFactory) GetBlobServicePropertiesClient() blobservicepropertiesclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlobServicePropertiesClient") + ret0, _ := ret[0].(blobservicepropertiesclient.Interface) + return ret0 +} + +// GetBlobServicePropertiesClient indicates an expected call of GetBlobServicePropertiesClient. +func (mr *MockClientFactoryMockRecorder) GetBlobServicePropertiesClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlobServicePropertiesClient", reflect.TypeOf((*MockClientFactory)(nil).GetBlobServicePropertiesClient)) +} + +// GetDeploymentClient mocks base method. +func (m *MockClientFactory) GetDeploymentClient() deploymentclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeploymentClient") + ret0, _ := ret[0].(deploymentclient.Interface) + return ret0 +} + +// GetDeploymentClient indicates an expected call of GetDeploymentClient. +func (mr *MockClientFactoryMockRecorder) GetDeploymentClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeploymentClient", reflect.TypeOf((*MockClientFactory)(nil).GetDeploymentClient)) +} + +// GetDiskClient mocks base method. +func (m *MockClientFactory) GetDiskClient() diskclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDiskClient") + ret0, _ := ret[0].(diskclient.Interface) + return ret0 +} + +// GetDiskClient indicates an expected call of GetDiskClient. +func (mr *MockClientFactoryMockRecorder) GetDiskClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDiskClient", reflect.TypeOf((*MockClientFactory)(nil).GetDiskClient)) +} + +// GetDiskClientForSub mocks base method. +func (m *MockClientFactory) GetDiskClientForSub(arg0 string) (diskclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDiskClientForSub", arg0) + ret0, _ := ret[0].(diskclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDiskClientForSub indicates an expected call of GetDiskClientForSub. +func (mr *MockClientFactoryMockRecorder) GetDiskClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDiskClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetDiskClientForSub), arg0) +} + +// GetFileShareClient mocks base method. +func (m *MockClientFactory) GetFileShareClient() fileshareclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileShareClient") + ret0, _ := ret[0].(fileshareclient.Interface) + return ret0 +} + +// GetFileShareClient indicates an expected call of GetFileShareClient. +func (mr *MockClientFactoryMockRecorder) GetFileShareClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileShareClient", reflect.TypeOf((*MockClientFactory)(nil).GetFileShareClient)) +} + +// GetFileShareClientForSub mocks base method. +func (m *MockClientFactory) GetFileShareClientForSub(arg0 string) (fileshareclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileShareClientForSub", arg0) + ret0, _ := ret[0].(fileshareclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileShareClientForSub indicates an expected call of GetFileShareClientForSub. +func (mr *MockClientFactoryMockRecorder) GetFileShareClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileShareClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetFileShareClientForSub), arg0) +} + +// GetIPGroupClient mocks base method. +func (m *MockClientFactory) GetIPGroupClient() ipgroupclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIPGroupClient") + ret0, _ := ret[0].(ipgroupclient.Interface) + return ret0 +} + +// GetIPGroupClient indicates an expected call of GetIPGroupClient. +func (mr *MockClientFactoryMockRecorder) GetIPGroupClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPGroupClient", reflect.TypeOf((*MockClientFactory)(nil).GetIPGroupClient)) +} + +// GetInterfaceClient mocks base method. +func (m *MockClientFactory) GetInterfaceClient() interfaceclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInterfaceClient") + ret0, _ := ret[0].(interfaceclient.Interface) + return ret0 +} + +// GetInterfaceClient indicates an expected call of GetInterfaceClient. +func (mr *MockClientFactoryMockRecorder) GetInterfaceClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInterfaceClient", reflect.TypeOf((*MockClientFactory)(nil).GetInterfaceClient)) +} + +// GetLoadBalancerClient mocks base method. +func (m *MockClientFactory) GetLoadBalancerClient() loadbalancerclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLoadBalancerClient") + ret0, _ := ret[0].(loadbalancerclient.Interface) + return ret0 +} + +// GetLoadBalancerClient indicates an expected call of GetLoadBalancerClient. +func (mr *MockClientFactoryMockRecorder) GetLoadBalancerClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLoadBalancerClient", reflect.TypeOf((*MockClientFactory)(nil).GetLoadBalancerClient)) +} + +// GetManagedClusterClient mocks base method. +func (m *MockClientFactory) GetManagedClusterClient() managedclusterclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetManagedClusterClient") + ret0, _ := ret[0].(managedclusterclient.Interface) + return ret0 +} + +// GetManagedClusterClient indicates an expected call of GetManagedClusterClient. +func (mr *MockClientFactoryMockRecorder) GetManagedClusterClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagedClusterClient", reflect.TypeOf((*MockClientFactory)(nil).GetManagedClusterClient)) +} + +// GetPrivateEndpointClient mocks base method. +func (m *MockClientFactory) GetPrivateEndpointClient() privateendpointclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPrivateEndpointClient") + ret0, _ := ret[0].(privateendpointclient.Interface) + return ret0 +} + +// GetPrivateEndpointClient indicates an expected call of GetPrivateEndpointClient. +func (mr *MockClientFactoryMockRecorder) GetPrivateEndpointClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateEndpointClient", reflect.TypeOf((*MockClientFactory)(nil).GetPrivateEndpointClient)) +} + +// GetPrivateLinkServiceClient mocks base method. +func (m *MockClientFactory) GetPrivateLinkServiceClient() privatelinkserviceclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPrivateLinkServiceClient") + ret0, _ := ret[0].(privatelinkserviceclient.Interface) + return ret0 +} + +// GetPrivateLinkServiceClient indicates an expected call of GetPrivateLinkServiceClient. +func (mr *MockClientFactoryMockRecorder) GetPrivateLinkServiceClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateLinkServiceClient", reflect.TypeOf((*MockClientFactory)(nil).GetPrivateLinkServiceClient)) +} + +// GetPrivateZoneClient mocks base method. +func (m *MockClientFactory) GetPrivateZoneClient() privatezoneclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPrivateZoneClient") + ret0, _ := ret[0].(privatezoneclient.Interface) + return ret0 +} + +// GetPrivateZoneClient indicates an expected call of GetPrivateZoneClient. +func (mr *MockClientFactoryMockRecorder) GetPrivateZoneClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateZoneClient", reflect.TypeOf((*MockClientFactory)(nil).GetPrivateZoneClient)) +} + +// GetProviderClient mocks base method. +func (m *MockClientFactory) GetProviderClient() providerclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProviderClient") + ret0, _ := ret[0].(providerclient.Interface) + return ret0 +} + +// GetProviderClient indicates an expected call of GetProviderClient. +func (mr *MockClientFactoryMockRecorder) GetProviderClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProviderClient", reflect.TypeOf((*MockClientFactory)(nil).GetProviderClient)) +} + +// GetPublicIPAddressClient mocks base method. +func (m *MockClientFactory) GetPublicIPAddressClient() publicipaddressclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicIPAddressClient") + ret0, _ := ret[0].(publicipaddressclient.Interface) + return ret0 +} + +// GetPublicIPAddressClient indicates an expected call of GetPublicIPAddressClient. +func (mr *MockClientFactoryMockRecorder) GetPublicIPAddressClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicIPAddressClient", reflect.TypeOf((*MockClientFactory)(nil).GetPublicIPAddressClient)) +} + +// GetPublicIPPrefixClient mocks base method. +func (m *MockClientFactory) GetPublicIPPrefixClient() publicipprefixclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicIPPrefixClient") + ret0, _ := ret[0].(publicipprefixclient.Interface) + return ret0 +} + +// GetPublicIPPrefixClient indicates an expected call of GetPublicIPPrefixClient. +func (mr *MockClientFactoryMockRecorder) GetPublicIPPrefixClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicIPPrefixClient", reflect.TypeOf((*MockClientFactory)(nil).GetPublicIPPrefixClient)) +} + +// GetRegistryClient mocks base method. +func (m *MockClientFactory) GetRegistryClient() registryclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRegistryClient") + ret0, _ := ret[0].(registryclient.Interface) + return ret0 +} + +// GetRegistryClient indicates an expected call of GetRegistryClient. +func (mr *MockClientFactoryMockRecorder) GetRegistryClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRegistryClient", reflect.TypeOf((*MockClientFactory)(nil).GetRegistryClient)) +} + +// GetResourceGroupClient mocks base method. +func (m *MockClientFactory) GetResourceGroupClient() resourcegroupclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourceGroupClient") + ret0, _ := ret[0].(resourcegroupclient.Interface) + return ret0 +} + +// GetResourceGroupClient indicates an expected call of GetResourceGroupClient. +func (mr *MockClientFactoryMockRecorder) GetResourceGroupClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourceGroupClient", reflect.TypeOf((*MockClientFactory)(nil).GetResourceGroupClient)) +} + +// GetRouteTableClient mocks base method. +func (m *MockClientFactory) GetRouteTableClient() routetableclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRouteTableClient") + ret0, _ := ret[0].(routetableclient.Interface) + return ret0 +} + +// GetRouteTableClient indicates an expected call of GetRouteTableClient. +func (mr *MockClientFactoryMockRecorder) GetRouteTableClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRouteTableClient", reflect.TypeOf((*MockClientFactory)(nil).GetRouteTableClient)) +} + +// GetSSHPublicKeyResourceClient mocks base method. +func (m *MockClientFactory) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSSHPublicKeyResourceClient") + ret0, _ := ret[0].(sshpublickeyresourceclient.Interface) + return ret0 +} + +// GetSSHPublicKeyResourceClient indicates an expected call of GetSSHPublicKeyResourceClient. +func (mr *MockClientFactoryMockRecorder) GetSSHPublicKeyResourceClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSSHPublicKeyResourceClient", reflect.TypeOf((*MockClientFactory)(nil).GetSSHPublicKeyResourceClient)) +} + +// GetSecretClient mocks base method. +func (m *MockClientFactory) GetSecretClient() secretclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecretClient") + ret0, _ := ret[0].(secretclient.Interface) + return ret0 +} + +// GetSecretClient indicates an expected call of GetSecretClient. +func (mr *MockClientFactoryMockRecorder) GetSecretClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretClient", reflect.TypeOf((*MockClientFactory)(nil).GetSecretClient)) +} + +// GetSecurityGroupClient mocks base method. +func (m *MockClientFactory) GetSecurityGroupClient() securitygroupclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecurityGroupClient") + ret0, _ := ret[0].(securitygroupclient.Interface) + return ret0 +} + +// GetSecurityGroupClient indicates an expected call of GetSecurityGroupClient. +func (mr *MockClientFactoryMockRecorder) GetSecurityGroupClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecurityGroupClient", reflect.TypeOf((*MockClientFactory)(nil).GetSecurityGroupClient)) +} + +// GetSnapshotClient mocks base method. +func (m *MockClientFactory) GetSnapshotClient() snapshotclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotClient") + ret0, _ := ret[0].(snapshotclient.Interface) + return ret0 +} + +// GetSnapshotClient indicates an expected call of GetSnapshotClient. +func (mr *MockClientFactoryMockRecorder) GetSnapshotClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClient", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClient)) +} + +// GetSubnetClient mocks base method. +func (m *MockClientFactory) GetSubnetClient() subnetclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetClient") + ret0, _ := ret[0].(subnetclient.Interface) + return ret0 +} + +// GetSubnetClient indicates an expected call of GetSubnetClient. +func (mr *MockClientFactoryMockRecorder) GetSubnetClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetClient", reflect.TypeOf((*MockClientFactory)(nil).GetSubnetClient)) +} + +// GetVaultClient mocks base method. +func (m *MockClientFactory) GetVaultClient() vaultclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVaultClient") + ret0, _ := ret[0].(vaultclient.Interface) + return ret0 +} + +// GetVaultClient indicates an expected call of GetVaultClient. +func (mr *MockClientFactoryMockRecorder) GetVaultClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVaultClient", reflect.TypeOf((*MockClientFactory)(nil).GetVaultClient)) +} + +// GetVirtualMachineClient mocks base method. +func (m *MockClientFactory) GetVirtualMachineClient() virtualmachineclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualMachineClient") + ret0, _ := ret[0].(virtualmachineclient.Interface) + return ret0 +} + +// GetVirtualMachineClient indicates an expected call of GetVirtualMachineClient. +func (mr *MockClientFactoryMockRecorder) GetVirtualMachineClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineClient", reflect.TypeOf((*MockClientFactory)(nil).GetVirtualMachineClient)) +} + +// GetVirtualMachineScaleSetClient mocks base method. +func (m *MockClientFactory) GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualMachineScaleSetClient") + ret0, _ := ret[0].(virtualmachinescalesetclient.Interface) + return ret0 +} + +// GetVirtualMachineScaleSetClient indicates an expected call of GetVirtualMachineScaleSetClient. +func (mr *MockClientFactoryMockRecorder) GetVirtualMachineScaleSetClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetClient", reflect.TypeOf((*MockClientFactory)(nil).GetVirtualMachineScaleSetClient)) +} + +// GetVirtualMachineScaleSetVMClient mocks base method. +func (m *MockClientFactory) GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualMachineScaleSetVMClient") + ret0, _ := ret[0].(virtualmachinescalesetvmclient.Interface) + return ret0 +} + +// GetVirtualMachineScaleSetVMClient indicates an expected call of GetVirtualMachineScaleSetVMClient. +func (mr *MockClientFactoryMockRecorder) GetVirtualMachineScaleSetVMClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetVMClient", reflect.TypeOf((*MockClientFactory)(nil).GetVirtualMachineScaleSetVMClient)) +} + +// GetVirtualNetworkClient mocks base method. +func (m *MockClientFactory) GetVirtualNetworkClient() virtualnetworkclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualNetworkClient") + ret0, _ := ret[0].(virtualnetworkclient.Interface) + return ret0 +} + +// GetVirtualNetworkClient indicates an expected call of GetVirtualNetworkClient. +func (mr *MockClientFactoryMockRecorder) GetVirtualNetworkClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualNetworkClient", reflect.TypeOf((*MockClientFactory)(nil).GetVirtualNetworkClient)) +} + +// GetVirtualNetworkLinkClient mocks base method. +func (m *MockClientFactory) GetVirtualNetworkLinkClient() virtualnetworklinkclient.Interface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVirtualNetworkLinkClient") + ret0, _ := ret[0].(virtualnetworklinkclient.Interface) + return ret0 +} + +// GetVirtualNetworkLinkClient indicates an expected call of GetVirtualNetworkLinkClient. +func (mr *MockClientFactoryMockRecorder) GetVirtualNetworkLinkClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualNetworkLinkClient", reflect.TypeOf((*MockClientFactory)(nil).GetVirtualNetworkLinkClient)) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go index dc3e8d8cd..097f19780 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.PrivateEndpointsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewPrivateEndpointsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + PrivateEndpointsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "PrivateEndpointsClient.Get" + // Get gets the PrivateEndpoint func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PrivateEndpoint, rerr error) { var ops *armnetwork.PrivateEndpointsClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.PrivateEndpointsClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.PrivateEndpoint, nil } +const CreateOrUpdateOperationName = "PrivateEndpointsClient.Create" + // CreateOrUpdate creates or updates a PrivateEndpoint. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateEndpoint) (*armnetwork.PrivateEndpoint, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateEndpoint) (result *armnetwork.PrivateEndpoint, err error) { ctx = utils.ContextWithClientName(ctx, "PrivateEndpointsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.PrivateEndpointsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go index 90abb4a9f..760541d40 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.PrivateLinkServicesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewPrivateLinkServicesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + PrivateLinkServicesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "PrivateLinkServicesClient.Get" + // Get gets the PrivateLinkService func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PrivateLinkService, rerr error) { var ops *armnetwork.PrivateLinkServicesClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.PrivateLinkServicesClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.PrivateLinkService, nil } +const CreateOrUpdateOperationName = "PrivateLinkServicesClient.Create" + // CreateOrUpdate creates or updates a PrivateLinkService. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateLinkService) (*armnetwork.PrivateLinkService, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateLinkService) (result *armnetwork.PrivateLinkService, err error) { ctx = utils.ContextWithClientName(ctx, "PrivateLinkServicesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.PrivateLinkServicesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "PrivateLinkServicesClient.Delete" + // Delete deletes a PrivateLinkService by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "PrivateLinkServicesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "PrivateLinkServicesClient.List" + // List gets a list of PrivateLinkService in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PrivateLinkService, rerr error) { ctx = utils.ContextWithClientName(ctx, "PrivateLinkServicesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.PrivateLinkServicesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go index c317ca2c4..f28d4b6d7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armprivatedns.PrivateZonesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armprivatedns.NewPrivateZonesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + PrivateZonesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "PrivateZonesClient.Get" + // Get gets the PrivateZone func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armprivatedns.PrivateZone, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.PrivateZonesClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.PrivateZone, nil } +const CreateOrUpdateOperationName = "PrivateZonesClient.Create" + // CreateOrUpdate creates or updates a PrivateZone. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armprivatedns.PrivateZone) (*armprivatedns.PrivateZone, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armprivatedns.PrivateZone) (result *armprivatedns.PrivateZone, err error) { ctx = utils.ContextWithClientName(ctx, "PrivateZonesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.PrivateZonesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient/zz_generated_client.go index a175b4e01..f942f94e2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/providerclient/zz_generated_client.go @@ -20,6 +20,7 @@ package providerclient import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armresources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -28,16 +29,22 @@ import ( type Client struct { *armresources.ProvidersClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armresources.NewProvidersClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + ProvidersClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go index 449e418fa..943843e87 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.PublicIPAddressesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewPublicIPAddressesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + PublicIPAddressesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "PublicIPAddressesClient.Get" + // Get gets the PublicIPAddress func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PublicIPAddress, rerr error) { var ops *armnetwork.PublicIPAddressesClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.PublicIPAddressesClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.PublicIPAddress, nil } +const CreateOrUpdateOperationName = "PublicIPAddressesClient.Create" + // CreateOrUpdate creates or updates a PublicIPAddress. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPAddress) (*armnetwork.PublicIPAddress, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPAddress) (result *armnetwork.PublicIPAddress, err error) { ctx = utils.ContextWithClientName(ctx, "PublicIPAddressesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.PublicIPAddressesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "PublicIPAddressesClient.Delete" + // Delete deletes a PublicIPAddress by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "PublicIPAddressesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "PublicIPAddressesClient.List" + // List gets a list of PublicIPAddress in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PublicIPAddress, rerr error) { ctx = utils.ContextWithClientName(ctx, "PublicIPAddressesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.PublicIPAddressesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go index 41a507308..ca7b23979 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.PublicIPPrefixesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewPublicIPPrefixesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + PublicIPPrefixesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "PublicIPPrefixesClient.Get" + // Get gets the PublicIPPrefix func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PublicIPPrefix, rerr error) { var ops *armnetwork.PublicIPPrefixesClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.PublicIPPrefixesClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.PublicIPPrefix, nil } +const CreateOrUpdateOperationName = "PublicIPPrefixesClient.Create" + // CreateOrUpdate creates or updates a PublicIPPrefix. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPPrefix) (*armnetwork.PublicIPPrefix, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPPrefix) (result *armnetwork.PublicIPPrefix, err error) { ctx = utils.ContextWithClientName(ctx, "PublicIPPrefixesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.PublicIPPrefixesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "PublicIPPrefixesClient.Delete" + // Delete deletes a PublicIPPrefix by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "PublicIPPrefixesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "PublicIPPrefixesClient.List" + // List gets a list of PublicIPPrefix in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PublicIPPrefix, rerr error) { ctx = utils.ContextWithClientName(ctx, "PublicIPPrefixesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.PublicIPPrefixesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient/zz_generated_client.go index 693b7ea79..d43e1db29 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/registryclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcontainerregistry "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcontainerregistry.RegistriesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcontainerregistry.NewRegistriesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + RegistriesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "RegistriesClient.Get" + // Get gets the Registry func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcontainerregistry.Registry, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.RegistriesClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,22 +71,30 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.Registry, nil } +const DeleteOperationName = "RegistriesClient.Delete" + // Delete deletes a Registry by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "RegistriesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "RegistriesClient.List" + // List gets a list of Registry in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcontainerregistry.Registry, rerr error) { ctx = utils.ContextWithClientName(ctx, "RegistriesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.RegistriesClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go index c847e3114..25bd43d25 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go @@ -20,6 +20,7 @@ package resourcegroupclient import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -28,16 +29,22 @@ import ( type Client struct { *resources.ResourceGroupsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := resources.NewResourceGroupsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + ResourceGroupsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go index 23aa9b8f4..5eb0d5f30 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.RouteTablesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewRouteTablesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + RouteTablesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "RouteTablesClient.Get" + // Get gets the RouteTable func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armnetwork.RouteTable, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.RouteTablesClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.RouteTable, nil } +const CreateOrUpdateOperationName = "RouteTablesClient.Create" + // CreateOrUpdate creates or updates a RouteTable. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.RouteTable) (*armnetwork.RouteTable, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.RouteTable) (result *armnetwork.RouteTable, err error) { ctx = utils.ContextWithClientName(ctx, "RouteTablesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.RouteTablesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -75,22 +91,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "RouteTablesClient.Delete" + // Delete deletes a RouteTable by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "RouteTablesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "RouteTablesClient.List" + // List gets a list of RouteTable in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.RouteTable, rerr error) { ctx = utils.ContextWithClientName(ctx, "RouteTablesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.RouteTablesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient/zz_generated_client.go index ec78083cf..7bbdfd06d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/secretclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armkeyvault "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armkeyvault.SecretsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armkeyvault.NewSecretsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + SecretsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "SecretsClient.Get" + // Get gets the Secret func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armkeyvault.Secret, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.SecretsClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR return &resp.Secret, nil } +const ListOperationName = "SecretsClient.List" + // List gets a list of Secret in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armkeyvault.Secret, rerr error) { ctx = utils.ContextWithClientName(ctx, "SecretsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.SecretsClient.NewListPager(resourceGroupName, parentResourceName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go index cd1255a3f..df1c46780 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.SecurityGroupsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewSecurityGroupsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + SecurityGroupsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "SecurityGroupsClient.Get" + // Get gets the SecurityGroup func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armnetwork.SecurityGroup, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.SecurityGroupsClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.SecurityGroup, nil } +const CreateOrUpdateOperationName = "SecurityGroupsClient.Create" + // CreateOrUpdate creates or updates a SecurityGroup. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.SecurityGroup) (*armnetwork.SecurityGroup, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.SecurityGroup) (result *armnetwork.SecurityGroup, err error) { ctx = utils.ContextWithClientName(ctx, "SecurityGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.SecurityGroupsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -75,22 +91,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "SecurityGroupsClient.Delete" + // Delete deletes a SecurityGroup by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "SecurityGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "SecurityGroupsClient.List" + // List gets a list of SecurityGroup in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.SecurityGroup, rerr error) { ctx = utils.ContextWithClientName(ctx, "SecurityGroupsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.SecurityGroupsClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go index e88c39127..47afc1b92 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcompute.SnapshotsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewSnapshotsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + SnapshotsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "SnapshotsClient.Get" + // Get gets the Snapshot func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.Snapshot, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.SnapshotsClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.Snapshot, nil } +const CreateOrUpdateOperationName = "SnapshotsClient.Create" + // CreateOrUpdate creates or updates a Snapshot. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Snapshot) (*armcompute.Snapshot, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Snapshot) (result *armcompute.Snapshot, err error) { ctx = utils.ContextWithClientName(ctx, "SnapshotsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.SnapshotsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -75,12 +91,16 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "SnapshotsClient.Delete" + // Delete deletes a Snapshot by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "SnapshotsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go index 1013b1664..4dedaf3ed 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcompute.SSHPublicKeysClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewSSHPublicKeysClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + SSHPublicKeysClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "SSHPublicKeysClient.Get" + // Get gets the SSHPublicKeyResource func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.SSHPublicKeyResource, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.SSHPublicKeysClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.SSHPublicKeyResource, nil } +const ListOperationName = "SSHPublicKeysClient.List" + // List gets a list of SSHPublicKeyResource in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.SSHPublicKeyResource, rerr error) { ctx = utils.ContextWithClientName(ctx, "SSHPublicKeysClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.SSHPublicKeysClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go index 6b4d863ec..3e0fdfa94 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.SubnetsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewSubnetsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + SubnetsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "SubnetsClient.Get" + // Get gets the Subnet func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string, expand *string) (result *armnetwork.Subnet, rerr error) { var ops *armnetwork.SubnetsClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.SubnetsClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR return &resp.Subnet, nil } +const CreateOrUpdateOperationName = "SubnetsClient.Create" + // CreateOrUpdate creates or updates a Subnet. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parentResourceName string, resource armnetwork.Subnet) (*armnetwork.Subnet, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parentResourceName string, resource armnetwork.Subnet) (result *armnetwork.Subnet, err error) { ctx = utils.ContextWithClientName(ctx, "SubnetsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.SubnetsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, parentResourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "SubnetsClient.Delete" + // Delete deletes a Subnet by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "SubnetsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "SubnetsClient.List" + // List gets a list of Subnet in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armnetwork.Subnet, rerr error) { ctx = utils.ContextWithClientName(ctx, "SubnetsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.SubnetsClient.NewListPager(resourceGroupName, parentResourceName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go index f21e34e7a..53506ba2d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go @@ -18,7 +18,7 @@ package utils const ( AzureClientID = "AZURE_CLIENT_ID" - AzureClientSecret = "AZURE_CLIENT_SECRET" + AzureClientSecret = "AZURE_CLIENT_SECRET" //nolint:gosec AzureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" AzureTenantID = "AZURE_TENANT_ID" ) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/consts.go new file mode 100644 index 000000000..b23a61f36 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/consts.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +const ( + ModuleName = "sigs.k8s.io/cloud-provider-azure/pkg/azclient" + ModuleVersion = "v0.0.1" +) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient/zz_generated_client.go index ccc5b0eeb..c0cad2d01 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/vaultclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armkeyvault "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armkeyvault.VaultsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armkeyvault.NewVaultsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + VaultsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "VaultsClient.Get" + // Get gets the Vault func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armkeyvault.Vault, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.VaultsClient.Get(ctx, resourceGroupName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.Vault, nil } +const ListOperationName = "VaultsClient.List" + // List gets a list of Vault in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armkeyvault.Vault, rerr error) { ctx = utils.ContextWithClientName(ctx, "VaultsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.VaultsClient.NewListByResourceGroupPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go index b3db14286..9fb0fc5c1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go @@ -19,6 +19,7 @@ package virtualmachineclient import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" ) @@ -37,3 +38,23 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc //handle statuscode return &resp.VirtualMachine, nil } + +func (client *Client) InstanceView(ctx context.Context, resourceGroupName string, vmName string) (*armcompute.VirtualMachineInstanceView, error) { + resp, err := client.VirtualMachinesClient.InstanceView(ctx, resourceGroupName, vmName, nil) + if err != nil { + return nil, err + } + return &resp.VirtualMachineInstanceView, nil +} + +func (client *Client) ListVMInstanceView(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) { + pager := client.VirtualMachinesClient.NewListPager(resourceGroupName, &armcompute.VirtualMachinesClientListOptions{Expand: to.Ptr(armcompute.ExpandTypeForListVMsInstanceView)}) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, err + } + result = append(result, nextResult.Value...) + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go index ef228f640..611a5ef5b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go @@ -18,6 +18,9 @@ limitations under the License. package virtualmachineclient import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -29,4 +32,7 @@ type Interface interface { utils.CreateOrUpdateFunc[armcompute.VirtualMachine] utils.DeleteFunc[armcompute.VirtualMachine] utils.ListFunc[armcompute.VirtualMachine] + InstanceView(ctx context.Context, resourceGroupName string, vmName string) (*armcompute.VirtualMachineInstanceView, error) + ListVMInstanceView(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) + BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go index 8014a347b..db67082d0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,26 +32,36 @@ import ( type Client struct { *armcompute.VirtualMachinesClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewVirtualMachinesClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + VirtualMachinesClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const CreateOrUpdateOperationName = "VirtualMachinesClient.Create" + // CreateOrUpdate creates or updates a VirtualMachine. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachine) (*armcompute.VirtualMachine, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachine) (result *armcompute.VirtualMachine, err error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachinesClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.VirtualMachinesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -60,22 +72,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "VirtualMachinesClient.Delete" + // Delete deletes a VirtualMachine by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachinesClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "VirtualMachinesClient.List" + // List gets a list of VirtualMachine in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachinesClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.VirtualMachinesClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go index 5034b62ba..fd2af62b3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,26 +32,36 @@ import ( type Client struct { *armcompute.VirtualMachineScaleSetsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewVirtualMachineScaleSetsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + VirtualMachineScaleSetsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const CreateOrUpdateOperationName = "VirtualMachineScaleSetsClient.Create" + // CreateOrUpdate creates or updates a VirtualMachineScaleSet. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachineScaleSet) (*armcompute.VirtualMachineScaleSet, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachineScaleSet) (result *armcompute.VirtualMachineScaleSet, err error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachineScaleSetsClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.VirtualMachineScaleSetsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -60,22 +72,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "VirtualMachineScaleSetsClient.Delete" + // Delete deletes a VirtualMachineScaleSet by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachineScaleSetsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "VirtualMachineScaleSetsClient.List" + // List gets a list of VirtualMachineScaleSet in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachineScaleSet, rerr error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachineScaleSetsClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.VirtualMachineScaleSetsClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go index 72747bec1..3fda4fa98 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go @@ -21,6 +21,7 @@ import ( "errors" "sync" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -28,7 +29,7 @@ import ( // Update updates a VirtualMachine. func (client *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) (*armcompute.VirtualMachineScaleSetVM, error) { - resp, err := client.UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters).WaitforPollerResp(ctx) + resp, err := utils.NewPollerWrapper(client.VirtualMachineScaleSetVMsClient.BeginUpdate(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err } @@ -38,10 +39,6 @@ func (client *Client) Update(ctx context.Context, resourceGroupName string, VMSc return nil, nil } -func (client *Client) UpdateAsync(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) *utils.PollerWrapper[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse] { - return utils.NewPollerWrapper(client.VirtualMachineScaleSetVMsClient.BeginUpdate(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, nil)) -} - func UpdateVMsInBatch(ctx context.Context, client *Client, resourceGroupName string, VMScaleSetName string, instances map[string]armcompute.VirtualMachineScaleSetVM, batchSize int) error { if batchSize <= 0 { return errors.New("batchSize should be greater than 0") @@ -97,3 +94,27 @@ func (client *Client) List(ctx context.Context, resourceGroupName string, parent } return result, nil } + +// GetInstanceView gets the instance view of the VirtualMachineScaleSetVM. +func (client *Client) GetInstanceView(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string) (*armcompute.VirtualMachineScaleSetVMInstanceView, error) { + resp, err := client.VirtualMachineScaleSetVMsClient.GetInstanceView(ctx, resourceGroupName, vmScaleSetName, instanceID, nil) + if err != nil { + return nil, err + } + return &resp.VirtualMachineScaleSetVMInstanceView, nil +} + +// List gets a list of VirtualMachineScaleSetVM in the resource group. +func (client *Client) ListVMInstanceView(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armcompute.VirtualMachineScaleSetVM, rerr error) { + pager := client.VirtualMachineScaleSetVMsClient.NewListPager(resourceGroupName, parentResourceName, &armcompute.VirtualMachineScaleSetVMsClientListOptions{ + Expand: to.Ptr(string(armcompute.InstanceViewTypesInstanceView)), + }) + for pager.More() { + nextResult, err := pager.NextPage(ctx) + if err != nil { + return nil, err + } + result = append(result, nextResult.Value...) + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go index bae4fdff6..7f90ae648 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go @@ -20,6 +20,7 @@ package virtualmachinescalesetvmclient import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,6 +31,10 @@ type Interface interface { utils.SubResourceGetFunc[armcompute.VirtualMachineScaleSetVM] utils.SubResourceDeleteFunc[armcompute.VirtualMachineScaleSetVM] utils.SubResourceListFunc[armcompute.VirtualMachineScaleSetVM] + ListVMInstanceView(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armcompute.VirtualMachineScaleSetVM, rerr error) + // Update updates a VirtualMachineScaleSetVM. Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) (*armcompute.VirtualMachineScaleSetVM, error) + GetInstanceView(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string) (*armcompute.VirtualMachineScaleSetVMInstanceView, error) + BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM, options *armcompute.VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*runtime.Poller[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse], error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go index 1ff038c98..53f2653c0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armcompute.VirtualMachineScaleSetVMsClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armcompute.NewVirtualMachineScaleSetVMsClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + VirtualMachineScaleSetVMsClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "VirtualMachineScaleSetVMsClient.Get" + // Get gets the VirtualMachineScaleSetVM func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armcompute.VirtualMachineScaleSetVM, rerr error) { @@ -51,6 +61,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil) if err != nil { return nil, err @@ -59,12 +71,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, parentR return &resp.VirtualMachineScaleSetVM, nil } +const DeleteOperationName = "VirtualMachineScaleSetVMsClient.Delete" + // Delete deletes a VirtualMachineScaleSetVM by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "VirtualMachineScaleSetVMsClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx) return err } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go index 7dced8853..d192ceb6d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go @@ -22,6 +22,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -30,20 +32,28 @@ import ( type Client struct { *armnetwork.VirtualNetworksClient subscriptionID string + tracer tracing.Tracer } func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { if options == nil { options = utils.GetDefaultOption() } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) client, err := armnetwork.NewVirtualNetworksClient(subscriptionID, credential, options) if err != nil { return nil, err } - return &Client{client, subscriptionID}, nil + return &Client{ + VirtualNetworksClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil } +const GetOperationName = "VirtualNetworksClient.Get" + // Get gets the VirtualNetwork func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.VirtualNetwork, rerr error) { var ops *armnetwork.VirtualNetworksClientGetOptions @@ -54,6 +64,8 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc ctx = utils.ContextWithRequestMethod(ctx, "Get") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) resp, err := client.VirtualNetworksClient.Get(ctx, resourceGroupName, resourceName, ops) if err != nil { return nil, err @@ -62,12 +74,16 @@ func (client *Client) Get(ctx context.Context, resourceGroupName string, resourc return &resp.VirtualNetwork, nil } +const CreateOrUpdateOperationName = "VirtualNetworksClient.Create" + // CreateOrUpdate creates or updates a VirtualNetwork. -func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.VirtualNetwork) (*armnetwork.VirtualNetwork, error) { +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.VirtualNetwork) (result *armnetwork.VirtualNetwork, err error) { ctx = utils.ContextWithClientName(ctx, "VirtualNetworksClient") ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) resp, err := utils.NewPollerWrapper(client.VirtualNetworksClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx) if err != nil { return nil, err @@ -78,22 +94,30 @@ func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName stri return nil, nil } +const DeleteOperationName = "VirtualNetworksClient.Delete" + // Delete deletes a VirtualNetwork by name. -func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error { +func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) (err error) { ctx = utils.ContextWithClientName(ctx, "VirtualNetworksClient") ctx = utils.ContextWithRequestMethod(ctx, "Delete") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) - _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) + ctx, endSpan := runtime.StartSpan(ctx, DeleteOperationName, client.tracer, nil) + defer endSpan(err) + _, err = utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx) return err } +const ListOperationName = "VirtualNetworksClient.List" + // List gets a list of VirtualNetwork in the resource group. func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.VirtualNetwork, rerr error) { ctx = utils.ContextWithClientName(ctx, "VirtualNetworksClient") ctx = utils.ContextWithRequestMethod(ctx, "List") ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, ListOperationName, client.tracer, nil) + defer endSpan(rerr) pager := client.VirtualNetworksClient.NewListPager(resourceGroupName, nil) for pager.More() { nextResult, err := pager.NextPage(ctx) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/interface.go new file mode 100644 index 000000000..3f4af5908 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/interface.go @@ -0,0 +1,30 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +azure:enableclientgen:=true +package virtualnetworklinkclient + +import ( + armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +// +azure:client:verbs=get;createorupdate,resource=PrivateZone,subResource=VirtualNetworkLink,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns,packageAlias=armprivatedns,clientName=VirtualNetworkLinksClient,expand=false,rateLimitKey=virtualNetworkRateLimit +type Interface interface { + utils.SubResourceGetFunc[armprivatedns.VirtualNetworkLink] + utils.SubResourceCreateOrUpdateFunc[armprivatedns.VirtualNetworkLink] +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/zz_generated_client.go new file mode 100644 index 000000000..d443f977f --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient/zz_generated_client.go @@ -0,0 +1,92 @@ +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +// Code generated by client-gen. DO NOT EDIT. +package virtualnetworklinkclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +type Client struct { + *armprivatedns.VirtualNetworkLinksClient + subscriptionID string + tracer tracing.Tracer +} + +func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) { + if options == nil { + options = utils.GetDefaultOption() + } + tr := options.TracingProvider.NewTracer(utils.ModuleName, utils.ModuleVersion) + + client, err := armprivatedns.NewVirtualNetworkLinksClient(subscriptionID, credential, options) + if err != nil { + return nil, err + } + return &Client{ + VirtualNetworkLinksClient: client, + subscriptionID: subscriptionID, + tracer: tr, + }, nil +} + +const GetOperationName = "VirtualNetworkLinksClient.Get" + +// Get gets the VirtualNetworkLink +func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armprivatedns.VirtualNetworkLink, rerr error) { + + ctx = utils.ContextWithClientName(ctx, "VirtualNetworkLinksClient") + ctx = utils.ContextWithRequestMethod(ctx, "Get") + ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) + ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, GetOperationName, client.tracer, nil) + defer endSpan(rerr) + resp, err := client.VirtualNetworkLinksClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil) + if err != nil { + return nil, err + } + //handle statuscode + return &resp.VirtualNetworkLink, nil +} + +const CreateOrUpdateOperationName = "VirtualNetworkLinksClient.Create" + +// CreateOrUpdate creates or updates a VirtualNetworkLink. +func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parentResourceName string, resource armprivatedns.VirtualNetworkLink) (result *armprivatedns.VirtualNetworkLink, err error) { + ctx = utils.ContextWithClientName(ctx, "VirtualNetworkLinksClient") + ctx = utils.ContextWithRequestMethod(ctx, "CreateOrUpdate") + ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) + ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) + resp, err := utils.NewPollerWrapper(client.VirtualNetworkLinksClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, parentResourceName, resource, nil)).WaitforPollerResp(ctx) + if err != nil { + return nil, err + } + if resp != nil { + return &resp.VirtualNetworkLink, nil + } + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go index 405f1efc0..b7108d37c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/diskclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/diskclient/interface.go -package=mockdiskclient Interface +// // Package mockdiskclient is a generated GoMock package. package mockdiskclient @@ -26,7 +31,7 @@ import ( reflect "reflect" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, subsID, resourceGrou } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, diskName, diskParameter any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, subsID, resourceGroupName, diskName, diskParameter) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, subsID, resourceGroupName, d } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, diskName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, diskName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, subsID, resourceGroupName, diskName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, subsID, resourceGroupName, disk } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, diskName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, diskName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, subsID, resourceGroupName, diskName) } @@ -106,7 +111,7 @@ func (m *MockInterface) ListByResourceGroup(ctx context.Context, subsID, resourc } // ListByResourceGroup indicates an expected call of ListByResourceGroup. -func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, subsID, resourceGroupName) } @@ -120,7 +125,7 @@ func (m *MockInterface) Update(ctx context.Context, subsID, resourceGroupName, d } // Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, subsID, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Update(ctx, subsID, resourceGroupName, diskName, diskParameter any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, subsID, resourceGroupName, diskName, diskParameter) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go index 53ba1bf5e..c19b89204 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/interfaceclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/interfaceclient/interface.go -package=mockinterfaceclient Interface +// // Package mockinterfaceclient is a generated GoMock package. package mockinterfaceclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, n } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkInterfaceName, parameters) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkIn } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkInterfaceName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkInterfaceName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkInterfaceName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, networkInter } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, networkInterfaceName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, networkInterfaceName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, networkInterfaceName, expand) } @@ -106,7 +111,7 @@ func (m *MockInterface) GetVirtualMachineScaleSetNetworkInterface(ctx context.Co } // GetVirtualMachineScaleSetNetworkInterface indicates an expected call of GetVirtualMachineScaleSetNetworkInterface. -func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetNetworkInterface", reflect.TypeOf((*MockInterface)(nil).GetVirtualMachineScaleSetNetworkInterface), ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go index c32c8a230..e7b095994 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/loadbalancerclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/loadbalancerclient/interface.go -package=mockloadbalancerclient Interface +// // Package mockloadbalancerclient is a generated GoMock package. package mockloadbalancerclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, l } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, loadBalancerName, parameters, etag) } @@ -76,7 +81,7 @@ func (m *MockInterface) CreateOrUpdateBackendPools(ctx context.Context, resource } // CreateOrUpdateBackendPools indicates an expected call of CreateOrUpdateBackendPools. -func (mr *MockInterfaceMockRecorder) CreateOrUpdateBackendPools(ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdateBackendPools(ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateBackendPools", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdateBackendPools), ctx, resourceGroupName, loadBalancerName, backendPoolName, parameters, etag) } @@ -90,7 +95,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, loadBalan } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, loadBalancerName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, loadBalancerName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, loadBalancerName) } @@ -104,7 +109,7 @@ func (m *MockInterface) DeleteLBBackendPool(ctx context.Context, resourceGroupNa } // DeleteLBBackendPool indicates an expected call of DeleteLBBackendPool. -func (mr *MockInterfaceMockRecorder) DeleteLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeleteLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLBBackendPool", reflect.TypeOf((*MockInterface)(nil).DeleteLBBackendPool), ctx, resourceGroupName, loadBalancerName, backendPoolName) } @@ -119,7 +124,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, loadBalancer } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, loadBalancerName, expand) } @@ -134,7 +139,7 @@ func (m *MockInterface) GetLBBackendPool(ctx context.Context, resourceGroupName, } // GetLBBackendPool indicates an expected call of GetLBBackendPool. -func (mr *MockInterfaceMockRecorder) GetLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) GetLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLBBackendPool", reflect.TypeOf((*MockInterface)(nil).GetLBBackendPool), ctx, resourceGroupName, loadBalancerName, backendPoolName, expand) } @@ -149,7 +154,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]n } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } @@ -163,7 +168,7 @@ func (m *MockInterface) MigrateToIPBasedBackendPool(ctx context.Context, resourc } // MigrateToIPBasedBackendPool indicates an expected call of MigrateToIPBasedBackendPool. -func (mr *MockInterfaceMockRecorder) MigrateToIPBasedBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolNames interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) MigrateToIPBasedBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolNames any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateToIPBasedBackendPool", reflect.TypeOf((*MockInterface)(nil).MigrateToIPBasedBackendPool), ctx, resourceGroupName, loadBalancerName, backendPoolNames) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go deleted file mode 100644 index b3fcab3b7..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package privatednsclient - -import ( - "context" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - - "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog/v2" - - azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient" - "sigs.k8s.io/cloud-provider-azure/pkg/metrics" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" -) - -var _ Interface = &Client{} - -const ( - privateDNSZoneResourceType = "Microsoft.Network/privateDnsZones" -) - -// Client implements privatednsclient Interface. -type Client struct { - armClient armclient.Interface - subscriptionID string - cloudName string - - // Rate limiting configures. - rateLimiterReader flowcontrol.RateLimiter - rateLimiterWriter flowcontrol.RateLimiter - - // ARM throttling configures. - RetryAfterReader time.Time - RetryAfterWriter time.Time -} - -// New creates a new private dns client with ratelimiting. -func New(config *azclients.ClientConfig) *Client { - baseURI := config.ResourceManagerEndpoint - authorizer := config.Authorizer - apiVersion := APIVersion - if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud { - klog.Warningf("Azure Stack is not supported for Private DNS Zone API") - } - armClient := armclient.New(authorizer, *config, baseURI, apiVersion) - rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) - - if azclients.RateLimitEnabled(config.RateLimitConfig) { - klog.V(2).Infof("Azure PrivateDNSZoneClient (read ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPS, - config.RateLimitConfig.CloudProviderRateLimitBucket) - klog.V(2).Infof("Azure PrivateDNSZoneClient (write ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPSWrite, - config.RateLimitConfig.CloudProviderRateLimitBucketWrite) - } - - client := &Client{ - armClient: armClient, - rateLimiterReader: rateLimiterReader, - rateLimiterWriter: rateLimiterWriter, - subscriptionID: config.SubscriptionID, - cloudName: config.CloudName, - } - - return client -} - -// CreateOrUpdate creates or updates a private DNS zone. -func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName, privateZoneName string, parameters privatedns.PrivateZone, etag string, waitForCompletion bool) *retry.Error { - mc := metrics.NewMetricContext("private_dns_zone", "create_or_update", resourceGroupName, c.subscriptionID, "") - - // Report errors if the client is rate limited. - if !c.rateLimiterWriter.TryAccept() { - mc.RateLimitedCount() - return retry.GetRateLimitError(true, "PrivateDNSZoneCreateOrUpdate") - } - - // Report errors if the client is throttled. - if c.RetryAfterWriter.After(time.Now()) { - mc.ThrottledCount() - rerr := retry.GetThrottlingError("PrivateDNSZoneCreateOrUpdate", "client throttled", c.RetryAfterWriter) - return rerr - } - - rerr := c.createOrUpdatePrivateDNSZone(ctx, resourceGroupName, privateZoneName, parameters, etag, waitForCompletion) - mc.Observe(rerr) - if rerr != nil { - if rerr.IsThrottled() { - // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. - c.RetryAfterWriter = rerr.RetryAfter - } - - return rerr - } - - return nil -} - -// createOrUpdatePrivateDNSZone creates or updates a private DNS zone. -func (c *Client) createOrUpdatePrivateDNSZone(ctx context.Context, resourceGroupName, privateDNSZoneName string, parameters privatedns.PrivateZone, etag string, waitForCompletion bool) *retry.Error { - resourceID := armclient.GetResourceID( - c.subscriptionID, - resourceGroupName, - privateDNSZoneResourceType, - privateDNSZoneName, - ) - decorators := []autorest.PrepareDecorator{} - if etag != "" { - decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag))) - } - - var response *http.Response - var rerr *retry.Error - if waitForCompletion { - response, rerr = c.armClient.PutResource(ctx, resourceID, parameters, decorators...) - defer c.armClient.CloseResponse(ctx, response) - } else { - _, rerr = c.armClient.PutResourceAsync(ctx, resourceID, parameters, decorators...) - } - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "privatednszone.put.request", resourceID, rerr.Error()) - return rerr - } - if !waitForCompletion { - return nil - } - - if response != nil && response.StatusCode != http.StatusNoContent { - _, rerr = c.createOrUpdateResponder(response) - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "privatednszone.put.respond", resourceID, rerr.Error()) - return rerr - } - } - - return nil -} - -func (c *Client) createOrUpdateResponder(resp *http.Response) (*privatedns.PrivateZone, *retry.Error) { - result := &privatedns.PrivateZone{} - err := autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result)) - result.Response = autorest.Response{Response: resp} - return result, retry.GetError(resp, err) -} - -// Get gets a private dns zone. -func (c *Client) Get(ctx context.Context, resourceGroupName, privateZoneName string) (privatedns.PrivateZone, *retry.Error) { - mc := metrics.NewMetricContext("private_dns_zones", "get", resourceGroupName, c.subscriptionID, "") - - // Report errors if the client is rate limited. - if !c.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - return privatedns.PrivateZone{}, retry.GetRateLimitError(false, "PrivateDNSZoneGet") - } - - // Report errors if the client is throttled. - if c.RetryAfterReader.After(time.Now()) { - mc.ThrottledCount() - rerr := retry.GetThrottlingError("PrivateDNSZoneGet", "client throttled", c.RetryAfterReader) - return privatedns.PrivateZone{}, rerr - } - - result, rerr := c.getPrivateDNSZone(ctx, resourceGroupName, privateZoneName) - mc.Observe(rerr) - if rerr != nil { - if rerr.IsThrottled() { - // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. - c.RetryAfterReader = rerr.RetryAfter - } - - return result, rerr - } - - return result, nil -} - -// getPrivateDNSZone gets a private DNS zone. -func (c *Client) getPrivateDNSZone(ctx context.Context, resourceGroupName, privateDNSZoneName string) (privatedns.PrivateZone, *retry.Error) { - resourceID := armclient.GetResourceID( - c.subscriptionID, - resourceGroupName, - privateDNSZoneResourceType, - privateDNSZoneName, - ) - result := privatedns.PrivateZone{} - - response, rerr := c.armClient.GetResource(ctx, resourceID) - defer c.armClient.CloseResponse(ctx, response) - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "privatednszone.get.request", resourceID, rerr.Error()) - return result, rerr - } - - err := autorest.Respond( - response, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result)) - if err != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "privatednszone.get.respond", resourceID, err) - return result, retry.GetError(response, err) - } - - result.Response = autorest.Response{Response: response} - return result, nil -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go deleted file mode 100644 index 63ae62a0a..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/interface.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package privatednsclient - -import ( - "context" - - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - - "sigs.k8s.io/cloud-provider-azure/pkg/retry" -) - -const ( - // APIVersion is the API version. - APIVersion = "2018-09-01" - // AzureStackCloudName is the cloud name of Azure Stack - AzureStackCloudName = "AZURESTACKCLOUD" -) - -// Interface is the client interface for Private DNS Zones -// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client. -type Interface interface { - // Get gets a private DNS zone - Get(ctx context.Context, resourceGroupName, privateZoneName string) (privatedns.PrivateZone, *retry.Error) - - // CreateOrUpdate creates or updates a private DNS zone. - CreateOrUpdate(ctx context.Context, resourceGroupName, privateZoneName string, parameters privatedns.PrivateZone, etag string, waitForCompletion bool) *retry.Error -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/mockprivatelinkserviceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/mockprivatelinkserviceclient/interface.go index 4225a8808..8b3e62cb0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/mockprivatelinkserviceclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient/mockprivatelinkserviceclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/privatelinkserviceclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/privatelinkserviceclient/interface.go -package=mockprivatelinkserviceclient Interface +// // Package mockprivatelinkserviceclient is a generated GoMock package. package mockprivatelinkserviceclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, p } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, privateLinkServiceName, privateLinkService, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, privateLinkServiceName, privateLinkService, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, privateLinkServiceName, privateLinkService, etag) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, privateLi } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, privateLinkServiceName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, privateLinkServiceName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, privateLinkServiceName) } @@ -90,7 +95,7 @@ func (m *MockInterface) DeletePEConnection(ctx context.Context, resourceGroupNam } // DeletePEConnection indicates an expected call of DeletePEConnection. -func (mr *MockInterfaceMockRecorder) DeletePEConnection(ctx, resourceGroupName, privateLinkServiceName, privateEndpointConnectionName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeletePEConnection(ctx, resourceGroupName, privateLinkServiceName, privateEndpointConnectionName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePEConnection", reflect.TypeOf((*MockInterface)(nil).DeletePEConnection), ctx, resourceGroupName, privateLinkServiceName, privateEndpointConnectionName) } @@ -105,7 +110,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, privateLinkS } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, privateLinkServiceName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, privateLinkServiceName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, privateLinkServiceName, expand) } @@ -120,7 +125,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]n } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go index a0ad22b48..e2ecaf457 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/publicipclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/publicipclient/interface.go -package=mockpublicipclient Interface +// // Package mockpublicipclient is a generated GoMock package. package mockpublicipclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, p } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, publicIPAddressName, parameters) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, publicIPA } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, publicIPAddressName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, publicIPAddressName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, publicIPAddressName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, publicIPAddr } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, publicIPAddressName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, publicIPAddressName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, publicIPAddressName, expand) } @@ -106,7 +111,7 @@ func (m *MockInterface) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Con } // GetVirtualMachineScaleSetPublicIPAddress indicates an expected call of GetVirtualMachineScaleSetPublicIPAddress. -func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetPublicIPAddress(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetPublicIPAddress", reflect.TypeOf((*MockInterface)(nil).GetVirtualMachineScaleSetPublicIPAddress), ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand) } @@ -121,7 +126,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]n } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } @@ -136,7 +141,7 @@ func (m *MockInterface) ListAll(ctx context.Context) ([]network.PublicIPAddress, } // ListAll indicates an expected call of ListAll. -func (mr *MockInterfaceMockRecorder) ListAll(ctx interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListAll(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAll", reflect.TypeOf((*MockInterface)(nil).ListAll), ctx) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go index 12285f340..df9ef1351 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/routeclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/routeclient/interface.go -package=mockrouteclient Interface +// // Package mockrouteclient is a generated GoMock package. package mockrouteclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, r } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, routeName, routeParameters, etag) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, routeTabl } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, routeTableName, routeName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, routeTableName, routeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, routeTableName, routeName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go index 605683bcf..200ff014c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/routetableclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/routetableclient/interface.go -package=mockroutetableclient Interface +// // Package mockroutetableclient is a generated GoMock package. package mockroutetableclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, r } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, parameters, etag) } @@ -77,7 +82,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, routeTableNa } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, routeTableName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, routeTableName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, routeTableName, expand) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go index dafbfbce4..e93756b92 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/securitygroupclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/securitygroupclient/interface.go -package=mocksecuritygroupclient Interface +// // Package mocksecuritygroupclient is a generated GoMock package. package mocksecuritygroupclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, n } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkSe } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkSecurityGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkSecurityGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkSecurityGroupName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, networkSecur } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, networkSecurityGroupName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, networkSecurityGroupName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, networkSecurityGroupName, expand) } @@ -106,7 +111,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]n } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go index b206f22e1..d6d5831ed 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/snapshotclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/snapshotclient/interface.go -package=mocksnapshotclient Interface +// // Package mocksnapshotclient is a generated GoMock package. package mocksnapshotclient @@ -26,7 +31,7 @@ import ( reflect "reflect" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, subsID, resourceGrou } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, snapshotName, snapshot interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, snapshotName, snapshot any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, subsID, resourceGroupName, snapshotName, snapshot) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, subsID, resourceGroupName, s } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, snapshotName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, snapshotName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, subsID, resourceGroupName, snapshotName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, subsID, resourceGroupName, snap } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, snapshotName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, snapshotName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, subsID, resourceGroupName, snapshotName) } @@ -106,7 +111,7 @@ func (m *MockInterface) ListByResourceGroup(ctx context.Context, subsID, resourc } // ListByResourceGroup indicates an expected call of ListByResourceGroup. -func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, subsID, resourceGroupName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go index 13c099056..04ceab7bf 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/storageaccountclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/storageaccountclient/interface.go -package=mockstorageaccountclient Interface +// // Package mockstorageaccountclient is a generated GoMock package. package mockstorageaccountclient @@ -26,7 +31,7 @@ import ( reflect "reflect" storage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) Create(ctx context.Context, subsID, resourceGroupName, a } // Create indicates an expected call of Create. -func (mr *MockInterfaceMockRecorder) Create(ctx, subsID, resourceGroupName, accountName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Create(ctx, subsID, resourceGroupName, accountName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockInterface)(nil).Create), ctx, subsID, resourceGroupName, accountName, parameters) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, subsID, resourceGroupName, a } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, accountName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, accountName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, subsID, resourceGroupName, accountName) } @@ -91,7 +96,7 @@ func (m *MockInterface) GetProperties(ctx context.Context, subsID, resourceGroup } // GetProperties indicates an expected call of GetProperties. -func (mr *MockInterfaceMockRecorder) GetProperties(ctx, subsID, resourceGroupName, accountName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) GetProperties(ctx, subsID, resourceGroupName, accountName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProperties", reflect.TypeOf((*MockInterface)(nil).GetProperties), ctx, subsID, resourceGroupName, accountName) } @@ -106,7 +111,7 @@ func (m *MockInterface) ListByResourceGroup(ctx context.Context, subsID, resourc } // ListByResourceGroup indicates an expected call of ListByResourceGroup. -func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, subsID, resourceGroupName) } @@ -121,7 +126,7 @@ func (m *MockInterface) ListKeys(ctx context.Context, subsID, resourceGroupName, } // ListKeys indicates an expected call of ListKeys. -func (mr *MockInterfaceMockRecorder) ListKeys(ctx, subsID, resourceGroupName, accountName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListKeys(ctx, subsID, resourceGroupName, accountName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeys", reflect.TypeOf((*MockInterface)(nil).ListKeys), ctx, subsID, resourceGroupName, accountName) } @@ -135,7 +140,7 @@ func (m *MockInterface) Update(ctx context.Context, subsID, resourceGroupName, a } // Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, subsID, resourceGroupName, accountName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Update(ctx, subsID, resourceGroupName, accountName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, subsID, resourceGroupName, accountName, parameters) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go index 30f162e11..22880f915 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/subnetclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/subnetclient/interface.go -package=mocksubnetclient Interface +// // Package mocksubnetclient is a generated GoMock package. package mocksubnetclient @@ -26,7 +31,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -62,7 +67,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, v } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) } @@ -76,7 +81,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, virtualNe } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, virtualNetworkName, subnetName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, virtualNetworkName, subnetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, virtualNetworkName, subnetName) } @@ -91,7 +96,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, virtualNetwo } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, virtualNetworkName, subnetName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, virtualNetworkName, subnetName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, virtualNetworkName, subnetName, expand) } @@ -106,7 +111,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualNetw } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualNetworkName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualNetworkName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualNetworkName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go deleted file mode 100644 index 8d2ab07de..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/azure_virtualnetworklinksclient.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package virtualnetworklinksclient - -import ( - "context" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog/v2" - - azclients "sigs.k8s.io/cloud-provider-azure/pkg/azureclients" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient" - "sigs.k8s.io/cloud-provider-azure/pkg/metrics" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" -) - -var _ Interface = &Client{} - -const ( - privateDNSZoneResourceType = "Microsoft.Network/privateDnsZones" - virtualNetworkLinkResourceType = "virtualNetworkLinks" -) - -// Client implements virtualnetworklinksclient Interface. -type Client struct { - armClient armclient.Interface - cloudName string - subscriptionID string - - // Rate limiting configures. - rateLimiterReader flowcontrol.RateLimiter - rateLimiterWriter flowcontrol.RateLimiter - - // ARM throttling configures. - RetryAfterReader time.Time - RetryAfterWriter time.Time -} - -// New creates a new virtualnetworklinks client. -func New(config *azclients.ClientConfig) *Client { - apiVersion := APIVersion - if strings.EqualFold(config.CloudName, AzureStackCloudName) && !config.DisableAzureStackCloud { - klog.Warningf("Azure Stack is not supported for Virtual Network Link API") - } - armClient := armclient.New(config.Authorizer, *config, config.ResourceManagerEndpoint, apiVersion) - - rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) - if azclients.RateLimitEnabled(config.RateLimitConfig) { - klog.V(2).Infof("Azure VirtualNetworkLinksClient (read ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPS, - config.RateLimitConfig.CloudProviderRateLimitBucket) - klog.V(2).Infof("Azure VirtualNetworkLinksClient (write ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPSWrite, - config.RateLimitConfig.CloudProviderRateLimitBucketWrite) - } - - client := &Client{ - armClient: armClient, - rateLimiterReader: rateLimiterReader, - rateLimiterWriter: rateLimiterWriter, - subscriptionID: config.SubscriptionID, - cloudName: config.CloudName, - } - return client -} - -// CreateOrUpdate creates or updates a virtual network link -func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters privatedns.VirtualNetworkLink, etag string, waitForCompletion bool) *retry.Error { - mc := metrics.NewMetricContext("virtual_network_links", "create_or_update", resourceGroupName, c.subscriptionID, "") - - // Report errors if the client is rate limited. - if !c.rateLimiterWriter.TryAccept() { - mc.RateLimitedCount() - return retry.GetRateLimitError(true, "VirtualNetworkLinkCreateOrUpdate") - } - - // Report errors if the client is throttled. - if c.RetryAfterWriter.After(time.Now()) { - mc.ThrottledCount() - rerr := retry.GetThrottlingError("VirtualNetworkLinkCreateOrUpdate", "client throttled", c.RetryAfterWriter) - return rerr - } - - rerr := c.createOrUpdateVirtualNetworkLink(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName, parameters, etag, waitForCompletion) - mc.Observe(rerr) - if rerr != nil { - if rerr.IsThrottled() { - // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. - c.RetryAfterWriter = rerr.RetryAfter - } - - return rerr - } - return nil -} - -// createOrUpdateVirtualNetworkLink creates or updates a virtual network link. -func (c *Client) createOrUpdateVirtualNetworkLink(ctx context.Context, resourceGroupName, privateZoneName, virtualNetworkLinkName string, parameters privatedns.VirtualNetworkLink, etag string, waitForCompletion bool) *retry.Error { - resourceID := armclient.GetChildResourceID( - c.subscriptionID, - resourceGroupName, - privateDNSZoneResourceType, - privateZoneName, - virtualNetworkLinkResourceType, - virtualNetworkLinkName, - ) - decorators := []autorest.PrepareDecorator{} - if etag != "" { - decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag))) - } - - var response *http.Response - var rerr *retry.Error - if waitForCompletion { - response, rerr = c.armClient.PutResource(ctx, resourceID, parameters, decorators...) - } else { - _, rerr = c.armClient.PutResourceAsync(ctx, resourceID, parameters, decorators...) - } - defer c.armClient.CloseResponse(ctx, response) - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "virtualnetworklink.put.request", resourceID, rerr.Error()) - return rerr - } - - if !waitForCompletion { - return nil - } - - if response != nil && response.StatusCode != http.StatusNoContent { - _, rerr = c.createOrUpdateResponder(response) - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "virtualnetworklink.put.request", resourceID, rerr.Error()) - return rerr - } - } - return nil -} - -func (c *Client) createOrUpdateResponder(resp *http.Response) (*privatedns.VirtualNetworkLink, *retry.Error) { - result := &privatedns.VirtualNetworkLink{} - err := autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result)) - result.Response = autorest.Response{Response: resp} - return result, retry.GetError(resp, err) -} - -// Get gets a virtual network link -func (c *Client) Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (result privatedns.VirtualNetworkLink, err *retry.Error) { - mc := metrics.NewMetricContext("virtual_network_links", "get", resourceGroupName, c.subscriptionID, "") - - // Report errors if the client is rate limited. - if !c.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - return privatedns.VirtualNetworkLink{}, retry.GetRateLimitError(false, "VirtualNetworkLinkGet") - } - - // Report errors if the client is throttled. - if c.RetryAfterReader.After(time.Now()) { - mc.ThrottledCount() - rerr := retry.GetThrottlingError("VirtualNetworkLinkGet", "client throttled", c.RetryAfterReader) - return privatedns.VirtualNetworkLink{}, rerr - } - result, rerr := c.getVirtualNetworkLink(ctx, resourceGroupName, privateZoneName, virtualNetworkLinkName) - - mc.Observe(rerr) - if rerr != nil { - if rerr.IsThrottled() { - // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. - c.RetryAfterReader = rerr.RetryAfter - } - return result, rerr - } - return result, nil -} - -// getVirtualNetworkLink gets a virtual network link. -func (c *Client) getVirtualNetworkLink(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (privatedns.VirtualNetworkLink, *retry.Error) { - resourceID := armclient.GetChildResourceID( - c.subscriptionID, - resourceGroupName, - privateDNSZoneResourceType, - privateZoneName, - virtualNetworkLinkResourceType, - virtualNetworkLinkName, - ) - result := privatedns.VirtualNetworkLink{} - response, rerr := c.armClient.GetResourceWithExpandQuery(ctx, resourceID, "") - defer c.armClient.CloseResponse(ctx, response) - if rerr != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "virtualnetworklink.get.request", resourceID, rerr.Error()) - return result, rerr - } - - err := autorest.Respond( - response, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result)) - if err != nil { - klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "virtualnetworklink.get.respond", resourceID, err) - return result, retry.GetError(response, err) - } - - result.Response = autorest.Response{Response: response} - return result, nil -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go deleted file mode 100644 index 5087bf601..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient/interface.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package virtualnetworklinksclient - -import ( - "context" - - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" - - "sigs.k8s.io/cloud-provider-azure/pkg/retry" -) - -const ( - // APIVersion is the API version. - // 2020-06-01 is the latest supported version - APIVersion = "2020-06-01" - // AzureStackCloudName is the cloud name of Azure Stack - AzureStackCloudName = "AZURESTACKCLOUD" -) - -// Interface is the client interface for Virtual Network Link. -// Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client. -type Interface interface { - // Get gets a virtual network link - Get(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string) (privatedns.VirtualNetworkLink, *retry.Error) - - // CreateOrUpdate creates or updates a virtual network link. - CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters privatedns.VirtualNetworkLink, etag string, waitForCompletion bool) *retry.Error -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go index 3384ce808..6a08ab029 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/vmclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/vmclient/interface.go -package=mockvmclient Interface +// // Package mockvmclient is a generated GoMock package. package mockvmclient @@ -27,7 +32,7 @@ import ( compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" azure "github.com/Azure/go-autorest/autorest/azure" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -63,7 +68,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, V } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMName, parameters, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, VMName, parameters, source) } @@ -77,7 +82,7 @@ func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, VMName st } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, VMName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, VMName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, VMName) } @@ -92,7 +97,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMName strin } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMName, expand) } @@ -107,7 +112,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]c } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } @@ -122,7 +127,7 @@ func (m *MockInterface) ListVmssFlexVMsWithOnlyInstanceView(ctx context.Context, } // ListVmssFlexVMsWithOnlyInstanceView indicates an expected call of ListVmssFlexVMsWithOnlyInstanceView. -func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithOnlyInstanceView(ctx, vmssFlexID interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithOnlyInstanceView(ctx, vmssFlexID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVmssFlexVMsWithOnlyInstanceView", reflect.TypeOf((*MockInterface)(nil).ListVmssFlexVMsWithOnlyInstanceView), ctx, vmssFlexID) } @@ -137,7 +142,7 @@ func (m *MockInterface) ListVmssFlexVMsWithoutInstanceView(ctx context.Context, } // ListVmssFlexVMsWithoutInstanceView indicates an expected call of ListVmssFlexVMsWithoutInstanceView. -func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithoutInstanceView(ctx, vmssFlexID interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListVmssFlexVMsWithoutInstanceView(ctx, vmssFlexID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVmssFlexVMsWithoutInstanceView", reflect.TypeOf((*MockInterface)(nil).ListVmssFlexVMsWithoutInstanceView), ctx, vmssFlexID) } @@ -152,7 +157,7 @@ func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMName st } // Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMName, parameters, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMName, parameters, source) } @@ -167,7 +172,7 @@ func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMNa } // UpdateAsync indicates an expected call of UpdateAsync. -func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMName, parameters, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAsync", reflect.TypeOf((*MockInterface)(nil).UpdateAsync), ctx, resourceGroupName, VMName, parameters, source) } @@ -182,7 +187,7 @@ func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.F } // WaitForUpdateResult indicates an expected call of WaitForUpdateResult. -func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go index 9cddb2d29..f3d166012 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/vmssclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/vmssclient/interface.go -package=mockvmssclient Interface +// // Package mockvmssclient is a generated GoMock package. package mockvmssclient @@ -28,7 +33,7 @@ import ( compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" azure "github.com/Azure/go-autorest/autorest/azure" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -64,7 +69,7 @@ func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, V } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, VMScaleSetName, parameters) } @@ -79,7 +84,7 @@ func (m *MockInterface) CreateOrUpdateAsync(ctx context.Context, resourceGroupNa } // CreateOrUpdateAsync indicates an expected call of CreateOrUpdateAsync. -func (mr *MockInterfaceMockRecorder) CreateOrUpdateAsync(ctx, resourceGroupName, VMScaleSetName, parameters interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdateAsync(ctx, resourceGroupName, VMScaleSetName, parameters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAsync", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdateAsync), ctx, resourceGroupName, VMScaleSetName, parameters) } @@ -94,7 +99,7 @@ func (m *MockInterface) DeallocateInstancesAsync(ctx context.Context, resourceGr } // DeallocateInstancesAsync indicates an expected call of DeallocateInstancesAsync. -func (mr *MockInterfaceMockRecorder) DeallocateInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeallocateInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeallocateInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeallocateInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) } @@ -108,7 +113,7 @@ func (m *MockInterface) DeleteInstances(ctx context.Context, resourceGroupName, } // DeleteInstances indicates an expected call of DeleteInstances. -func (mr *MockInterfaceMockRecorder) DeleteInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeleteInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstances", reflect.TypeOf((*MockInterface)(nil).DeleteInstances), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) } @@ -123,7 +128,7 @@ func (m *MockInterface) DeleteInstancesAsync(ctx context.Context, resourceGroupN } // DeleteInstancesAsync indicates an expected call of DeleteInstancesAsync. -func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeleteInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete) } @@ -138,7 +143,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetNa } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName) } @@ -153,7 +158,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]c } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } @@ -168,7 +173,7 @@ func (m *MockInterface) StartInstancesAsync(ctx context.Context, resourceGroupNa } // StartInstancesAsync indicates an expected call of StartInstancesAsync. -func (mr *MockInterfaceMockRecorder) StartInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) StartInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartInstancesAsync", reflect.TypeOf((*MockInterface)(nil).StartInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) } @@ -183,7 +188,7 @@ func (m *MockInterface) WaitForAsyncOperationResult(ctx context.Context, future } // WaitForAsyncOperationResult indicates an expected call of WaitForAsyncOperationResult. -func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, resourceGroupName, request, asyncOpName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, resourceGroupName, request, asyncOpName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationResult", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationResult), ctx, future, resourceGroupName, request, asyncOpName) } @@ -198,7 +203,7 @@ func (m *MockInterface) WaitForCreateOrUpdateResult(ctx context.Context, future } // WaitForCreateOrUpdateResult indicates an expected call of WaitForCreateOrUpdateResult. -func (mr *MockInterfaceMockRecorder) WaitForCreateOrUpdateResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForCreateOrUpdateResult(ctx, future, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCreateOrUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForCreateOrUpdateResult), ctx, future, resourceGroupName) } @@ -213,7 +218,7 @@ func (m *MockInterface) WaitForDeallocateInstancesResult(ctx context.Context, fu } // WaitForDeallocateInstancesResult indicates an expected call of WaitForDeallocateInstancesResult. -func (mr *MockInterfaceMockRecorder) WaitForDeallocateInstancesResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForDeallocateInstancesResult(ctx, future, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeallocateInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForDeallocateInstancesResult), ctx, future, resourceGroupName) } @@ -228,7 +233,7 @@ func (m *MockInterface) WaitForDeleteInstancesResult(ctx context.Context, future } // WaitForDeleteInstancesResult indicates an expected call of WaitForDeleteInstancesResult. -func (mr *MockInterfaceMockRecorder) WaitForDeleteInstancesResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForDeleteInstancesResult(ctx, future, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeleteInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForDeleteInstancesResult), ctx, future, resourceGroupName) } @@ -243,7 +248,7 @@ func (m *MockInterface) WaitForStartInstancesResult(ctx context.Context, future } // WaitForStartInstancesResult indicates an expected call of WaitForStartInstancesResult. -func (mr *MockInterfaceMockRecorder) WaitForStartInstancesResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForStartInstancesResult(ctx, future, resourceGroupName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForStartInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForStartInstancesResult), ctx, future, resourceGroupName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go index ae571b4c7..0bc207ca6 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go @@ -17,6 +17,11 @@ // Code generated by MockGen. DO NOT EDIT. // Source: pkg/azureclients/vmssvmclient/interface.go +// +// Generated by this command: +// +// mockgen -copyright_file=/home/runner/work/cloud-provider-azure/cloud-provider-azure/hack/boilerplate/boilerplate.generatego.txt -source=pkg/azureclients/vmssvmclient/interface.go -package=mockvmssvmclient Interface +// // Package mockvmssvmclient is a generated GoMock package. package mockvmssvmclient @@ -27,7 +32,7 @@ import ( compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" azure "github.com/Azure/go-autorest/autorest/azure" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -64,7 +69,7 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetNa } // Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName, instanceID, expand) } @@ -79,7 +84,7 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualMach } // List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualMachineScaleSetName, expand interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualMachineScaleSetName, expand any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualMachineScaleSetName, expand) } @@ -94,7 +99,7 @@ func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSe } // Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) } @@ -109,7 +114,7 @@ func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMSc } // UpdateAsync indicates an expected call of UpdateAsync. -func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAsync", reflect.TypeOf((*MockInterface)(nil).UpdateAsync), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) } @@ -123,7 +128,7 @@ func (m *MockInterface) UpdateVMs(ctx context.Context, resourceGroupName, VMScal } // UpdateVMs indicates an expected call of UpdateVMs. -func (mr *MockInterfaceMockRecorder) UpdateVMs(ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) UpdateVMs(ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVMs", reflect.TypeOf((*MockInterface)(nil).UpdateVMs), ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize) } @@ -138,7 +143,7 @@ func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.F } // WaitForUpdateResult indicates an expected call of WaitForUpdateResult. -func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index 723a4f89c..3b2448658 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -377,6 +377,8 @@ const ( FrontendIPConfigNameMaxLength = 80 // LoadBalancerRuleNameMaxLength is the max length of the load balancing rule LoadBalancerRuleNameMaxLength = 80 + // PIPPrefixNameMaxLength is the max length of the PIP prefix name + PIPPrefixNameMaxLength = 80 // IPFamilySuffixLength is the length of suffix length of IP family ("-IPv4", "-IPv6") IPFamilySuffixLength = 5 diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 8009029eb..83e1b73fc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -27,11 +27,13 @@ import ( "sync" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -58,7 +60,6 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatelinkserviceclient" @@ -69,7 +70,6 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/virtualnetworklinksclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmasclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmsizeclient" @@ -376,9 +376,7 @@ type Cloud struct { AvailabilitySetsClient vmasclient.Interface ZoneClient zoneclient.Interface privateendpointclient privateendpointclient.Interface - privatednsclient privatednsclient.Interface privatednszonegroupclient privatednszonegroupclient.Interface - virtualNetworkLinksClient virtualnetworklinksclient.Interface PrivateLinkServiceClient privatelinkserviceclient.Interface containerServiceClient containerserviceclient.Interface deploymentClient deploymentclient.Interface @@ -698,6 +696,33 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, } az.configAzureClients(servicePrincipalToken, multiTenantServicePrincipalToken, networkResourceServicePrincipalToken) + if az.ComputeClientFactory == nil { + authProvider, err := azclient.NewAuthProvider(&az.ARMClientConfig, &az.AzureAuthConfig.AzureAuthConfig) + if err != nil { + return err + } + var cred azcore.TokenCredential + if authProvider.IsMultiTenantModeEnabled() { + multiTenantCred := authProvider.GetMultiTenantIdentity() + networkTenantCred := authProvider.GetNetworkAzIdentity() + az.NetworkClientFactory, err = azclient.NewClientFactory(&azclient.ClientFactoryConfig{ + SubscriptionID: az.NetworkResourceSubscriptionID, + }, &az.ARMClientConfig, networkTenantCred) + if err != nil { + return err + } + cred = multiTenantCred + } else { + cred = authProvider.GetAzIdentity() + } + az.ComputeClientFactory, err = azclient.NewClientFactory(&azclient.ClientFactoryConfig{ + SubscriptionID: az.SubscriptionID, + }, &az.ARMClientConfig, cred) + if err != nil { + return err + } + } + err = az.initCaches() if err != nil { return err @@ -953,11 +978,9 @@ func (az *Cloud) configAzureClients( publicIPClientConfig := azClientConfig.WithRateLimiter(az.Config.PublicIPAddressRateLimit) containerServiceConfig := azClientConfig.WithRateLimiter(az.Config.ContainerServiceRateLimit) deploymentConfig := azClientConfig.WithRateLimiter(az.Config.DeploymentRateLimit) - privateDNSConfig := azClientConfig.WithRateLimiter(az.Config.PrivateDNSRateLimit) privateDNSZoenGroupConfig := azClientConfig.WithRateLimiter(az.Config.PrivateDNSZoneGroupRateLimit) privateEndpointConfig := azClientConfig.WithRateLimiter(az.Config.PrivateEndpointRateLimit) privateLinkServiceConfig := azClientConfig.WithRateLimiter(az.Config.PrivateLinkServiceRateLimit) - virtualNetworkConfig := azClientConfig.WithRateLimiter(az.Config.VirtualNetworkRateLimit) // TODO(ZeroMagic): add azurefileRateLimit fileClientConfig := azClientConfig.WithRateLimiter(nil) blobClientConfig := azClientConfig.WithRateLimiter(nil) @@ -1012,9 +1035,7 @@ func (az *Cloud) configAzureClients( az.BlobClient = blobclient.New(blobClientConfig) az.AvailabilitySetsClient = vmasclient.New(vmasClientConfig) az.privateendpointclient = privateendpointclient.New(privateEndpointConfig) - az.privatednsclient = privatednsclient.New(privateDNSConfig) az.privatednszonegroupclient = privatednszonegroupclient.New(privateDNSZoenGroupConfig) - az.virtualNetworkLinksClient = virtualnetworklinksclient.New(virtualNetworkConfig) az.PrivateLinkServiceClient = privatelinkserviceclient.New(privateLinkServiceConfig) az.containerServiceClient = containerserviceclient.New(containerServiceConfig) az.deploymentClient = deploymentclient.New(deploymentConfig) @@ -1503,3 +1524,40 @@ func isNodeReady(node *v1.Node) bool { } return false } + +// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. +func (az *Cloud) GetNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) { + // 1. vmType is standard or vmssflex, return cloud.VMSet directly. + // 1.1 all the nodes in the cluster are avset nodes. + // 1.2 all the nodes in the cluster are vmssflex nodes. + if az.VMType == consts.VMTypeStandard || az.VMType == consts.VMTypeVmssFlex { + return az.VMSet, nil + } + + // 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to ScaleSet. + // 2.1 all the nodes in the cluster are vmss uniform nodes. + // 2.2 mix node: the nodes in the cluster can be any of avset nodes, vmss uniform nodes and vmssflex nodes. + ss, ok := az.VMSet.(*ScaleSet) + if !ok { + return nil, fmt.Errorf("error of converting vmSet (%q) to ScaleSet with vmType %q", az.VMSet, az.VMType) + } + + vmManagementType, err := ss.getVMManagementTypeByNodeName(string(nodeName), crt) + if err != nil { + return nil, fmt.Errorf("getNodeVMSet: failed to check the node %s management type: %w", string(nodeName), err) + } + // 3. If the node is managed by availability set, then return ss.availabilitySet. + if vmManagementType == ManagedByAvSet { + // vm is managed by availability set. + return ss.availabilitySet, nil + } + if vmManagementType == ManagedByVmssFlex { + // 4. If the node is managed by vmss flex, then return ss.flexScaleSet. + // vm is managed by vmss flex. + return ss.flexScaleSet, nil + } + + // 5. Node is managed by vmss + return ss, nil + +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 48c9491ba..e1ba039a8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -111,11 +111,11 @@ type controllerCommon struct { // AttachDiskOptions attach disk options type AttachDiskOptions struct { - cachingMode compute.CachingTypes - diskName string - diskEncryptionSetID string - writeAcceleratorEnabled bool - lun int32 + CachingMode compute.CachingTypes + DiskName string + DiskEncryptionSetID string + WriteAcceleratorEnabled bool + Lun int32 } // ExtendedLocation contains additional info about the location of resources. @@ -126,43 +126,6 @@ type ExtendedLocation struct { Type string `json:"type,omitempty"` } -// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. -func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) { - // 1. vmType is standard or vmssflex, return cloud.VMSet directly. - // 1.1 all the nodes in the cluster are avset nodes. - // 1.2 all the nodes in the cluster are vmssflex nodes. - if c.cloud.VMType == consts.VMTypeStandard || c.cloud.VMType == consts.VMTypeVmssFlex { - return c.cloud.VMSet, nil - } - - // 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to ScaleSet. - // 2.1 all the nodes in the cluster are vmss uniform nodes. - // 2.2 mix node: the nodes in the cluster can be any of avset nodes, vmss uniform nodes and vmssflex nodes. - ss, ok := c.cloud.VMSet.(*ScaleSet) - if !ok { - return nil, fmt.Errorf("error of converting vmSet (%q) to ScaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType) - } - - vmManagementType, err := ss.getVMManagementTypeByNodeName(mapNodeNameToVMName(nodeName), crt) - if err != nil { - return nil, fmt.Errorf("getNodeVMSet: failed to check the node %s management type: %w", mapNodeNameToVMName(nodeName), err) - } - // 3. If the node is managed by availability set, then return ss.availabilitySet. - if vmManagementType == ManagedByAvSet { - // vm is managed by availability set. - return ss.availabilitySet, nil - } - if vmManagementType == ManagedByVmssFlex { - // 4. If the node is managed by vmss flex, then return ss.flexScaleSet. - // vm is managed by vmss flex. - return ss.flexScaleSet, nil - } - - // 5. Node is managed by vmss - return ss, nil - -} - // AttachDisk attaches a disk to vm // occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments // return (lun, error) @@ -175,7 +138,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str // don't check disk state when GetDisk is throttled if disk != nil { if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) { - vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { return -1, err } @@ -227,11 +190,11 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str } options := AttachDiskOptions{ - lun: -1, - diskName: diskName, - cachingMode: cachingMode, - diskEncryptionSetID: diskEncryptionSetID, - writeAcceleratorEnabled: writeAcceleratorEnabled, + Lun: -1, + DiskName: diskName, + CachingMode: cachingMode, + DiskEncryptionSetID: diskEncryptionSetID, + WriteAcceleratorEnabled: writeAcceleratorEnabled, } node := strings.ToLower(string(nodeName)) diskuri := strings.ToLower(diskURI) @@ -276,7 +239,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI str return lun, nil } - vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { return -1, err } @@ -395,7 +358,7 @@ func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI str return fmt.Errorf("failed to get azure instance id for node %q: %w", nodeName, err) } - vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { return err } @@ -452,7 +415,7 @@ func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI str // UpdateVM updates a vm func (c *controllerCommon) UpdateVM(ctx context.Context, nodeName types.NodeName) error { - vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { return err } @@ -515,7 +478,7 @@ func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string] // GetNodeDataDisks invokes vmSet interfaces to get data disks for the node. func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { - vmset, err := c.getNodeVMSet(nodeName, crt) + vmset, err := c.cloud.GetNodeVMSet(nodeName, crt) if err != nil { return nil, nil, err } @@ -611,7 +574,7 @@ func (c *controllerCommon) SetDiskLun(nodeName types.NodeName, diskURI string, d if strings.EqualFold(uri, diskURI) { lun = diskLuns[count] } - opt.lun = diskLuns[count] + opt.Lun = diskLuns[count] count++ } if lun < 0 { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 0606ac4d4..229e061b5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -56,39 +56,39 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa attached := false for _, disk := range *vm.StorageProfile.DataDisks { if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil { - if *disk.Lun == opt.lun { + if *disk.Lun == opt.Lun { attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun) + klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) continue } managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} - if opt.diskEncryptionSetID == "" { + if opt.DiskEncryptionSetID == "" { if vm.StorageProfile.OsDisk != nil && vm.StorageProfile.OsDisk.ManagedDisk != nil && vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil && vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil { // set diskEncryptionSet as value of os disk by default - opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID + opt.DiskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID } } - if opt.diskEncryptionSetID != "" { - managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID} + if opt.DiskEncryptionSetID != "" { + managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.DiskEncryptionSetID} } disks = append(disks, compute.DataDisk{ - Name: &opt.diskName, - Lun: &opt.lun, - Caching: opt.cachingMode, + Name: &opt.DiskName, + Lun: &opt.Lun, + Caching: opt.CachingMode, CreateOption: "attach", ManagedDisk: managedDisk, - WriteAcceleratorEnabled: pointer.Bool(opt.writeAcceleratorEnabled), + WriteAcceleratorEnabled: pointer.Bool(opt.WriteAcceleratorEnabled), }) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 6c666106b..650c13f62 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -62,40 +62,40 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis attached := false for _, disk := range *storageProfile.DataDisks { if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil { - if *disk.Lun == opt.lun { + if *disk.Lun == opt.Lun { attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun) + klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) continue } managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} - if opt.diskEncryptionSetID == "" { + if opt.DiskEncryptionSetID == "" { if storageProfile.OsDisk != nil && storageProfile.OsDisk.ManagedDisk != nil && storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil && storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil { // set diskEncryptionSet as value of os disk by default - opt.diskEncryptionSetID = *storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID + opt.DiskEncryptionSetID = *storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID } } - if opt.diskEncryptionSetID != "" { - managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID} + if opt.DiskEncryptionSetID != "" { + managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.DiskEncryptionSetID} } disks = append(disks, compute.DataDisk{ - Name: &opt.diskName, - Lun: &opt.lun, - Caching: opt.cachingMode, + Name: &opt.DiskName, + Lun: &opt.Lun, + Caching: opt.CachingMode, CreateOption: "attach", ManagedDisk: managedDisk, - WriteAcceleratorEnabled: pointer.Bool(opt.writeAcceleratorEnabled), + WriteAcceleratorEnabled: pointer.Bool(opt.WriteAcceleratorEnabled), }) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index ce4e0c3a0..9fa834934 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -56,39 +56,39 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, attached := false for _, disk := range *vm.StorageProfile.DataDisks { if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) && disk.Lun != nil { - if *disk.Lun == opt.lun { + if *disk.Lun == opt.Lun { attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.lun) + return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.lun) + klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) continue } managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} - if opt.diskEncryptionSetID == "" { + if opt.DiskEncryptionSetID == "" { if vm.StorageProfile.OsDisk != nil && vm.StorageProfile.OsDisk.ManagedDisk != nil && vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil && vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil { // set diskEncryptionSet as value of os disk by default - opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID + opt.DiskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID } } - if opt.diskEncryptionSetID != "" { - managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.diskEncryptionSetID} + if opt.DiskEncryptionSetID != "" { + managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &opt.DiskEncryptionSetID} } disks = append(disks, compute.DataDisk{ - Name: &opt.diskName, - Lun: &opt.lun, - Caching: opt.cachingMode, + Name: &opt.DiskName, + Lun: &opt.Lun, + Caching: opt.CachingMode, CreateOption: "attach", ManagedDisk: managedDisk, - WriteAcceleratorEnabled: pointer.Bool(opt.writeAcceleratorEnabled), + WriteAcceleratorEnabled: pointer.Bool(opt.WriteAcceleratorEnabled), }) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go index 0110aa553..8ba4ac31a 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go @@ -20,9 +20,10 @@ import ( "context" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" - "github.com/golang/mock/gomock" + "go.uber.org/mock/gomock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" @@ -117,6 +118,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl) az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl) az.PrivateLinkServiceClient = mockprivatelinkserviceclient.NewMockInterface(ctrl) + az.ComputeClientFactory = mock_azclient.NewMockClientFactory(ctrl) az.VMSet, _ = newAvailabilitySet(az) az.vmCache, _ = az.newVMCache() az.lbCache, _ = az.newLBCache() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index f7817d418..168efc90c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -26,7 +26,6 @@ import ( "net/netip" "reflect" "sort" - "strconv" "strings" "unicode" @@ -42,12 +41,14 @@ import ( servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/utils/strings/slices" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) @@ -142,7 +143,7 @@ func (az *Cloud) reconcileService(_ context.Context, clusterName string, service serviceIPs := lbIPsPrimaryPIPs klog.V(2).Infof("reconcileService: reconciling security group for service %q with IPs %q, wantLb = true", serviceName, serviceIPs) - if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPs, lb.Name, true /* wantLb */); err != nil { + if _, err := az.reconcileSecurityGroup(clusterName, service, ptr.Deref(lb.Name, ""), serviceIPs, true /* wantLb */); err != nil { klog.Errorf("reconcileSecurityGroup(%s) failed: %#v", serviceName, err) return nil, err } @@ -315,13 +316,17 @@ func (az *Cloud) EnsureLoadBalancerDeleted(_ context.Context, clusterName string klog.V(5).InfoS("EnsureLoadBalancerDeleted Finish", "service", serviceName, "cluster", clusterName, "service_spec", service, "error", err) }() - _, _, _, lbIPsPrimaryPIPs, _, err := az.getServiceLoadBalancer(service, clusterName, nil, false, &[]network.LoadBalancer{}) + lb, _, _, lbIPsPrimaryPIPs, _, err := az.getServiceLoadBalancer(service, clusterName, nil, false, &[]network.LoadBalancer{}) if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } serviceIPsToCleanup := lbIPsPrimaryPIPs klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IPs %q, wantLb = false", serviceName, serviceIPsToCleanup) - _, err = az.reconcileSecurityGroup(clusterName, service, &serviceIPsToCleanup, nil, false /* wantLb */) + var lbName string + if lb != nil { + lbName = ptr.Deref(lb.Name, "") + } + _, err = az.reconcileSecurityGroup(clusterName, service, lbName, serviceIPsToCleanup, false /* wantLb */) if err != nil { return err } @@ -887,14 +892,14 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L } // set additional public IPs to LoadBalancerStatus, so that kube-proxy would create their iptables rules. - additionalIPs, err := getServiceAdditionalPublicIPs(service) + additionalIPs, err := loadbalancer.AdditionalPublicIPs(service) if err != nil { return &v1.LoadBalancerStatus{Ingress: lbIngresses}, lbIPsPrimaryPIPs, fipConfigs, err } if len(additionalIPs) > 0 { for _, pip := range additionalIPs { lbIngresses = append(lbIngresses, v1.LoadBalancerIngress{ - IP: pip, + IP: pip.String(), }) } } @@ -2809,391 +2814,141 @@ func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties( // This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. -func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIPs *[]string, lbName *string, wantLb bool) (*network.SecurityGroup, error) { - serviceName := getServiceName(service) - klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) - - ports := service.Spec.Ports - if ports == nil { - if useSharedSecurityRule(service) { - klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) - return nil, fmt.Errorf("no port info for reconciling shared rule for service %s", service.Name) - } - ports = []v1.ServicePort{} - } - - sg, err := az.getSecurityGroup(azcache.CacheReadTypeDefault) - if err != nil { - return nil, err - } - - if wantLb && lbIPs == nil { +func (az *Cloud) reconcileSecurityGroup( + clusterName string, service *v1.Service, + lbName string, lbIPs []string, + wantLb bool, +) (*network.SecurityGroup, error) { + logger := klog.Background().WithName("reconcileSecurityGroup"). + WithValues("cluster", clusterName). + WithValues("service", getServiceName(service)). + WithValues("load-balancer", lbName). + WithValues("delete-lb", !wantLb) + logger.V(2).Info("Starting") + + if wantLb && len(lbIPs) == 0 { return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name) } - destinationIPAddresses := map[bool][]string{} - if lbIPs != nil { - for _, ip := range *lbIPs { - if net.ParseIP(ip).To4() != nil { - destinationIPAddresses[false] = append(destinationIPAddresses[false], ip) - } else { - destinationIPAddresses[true] = append(destinationIPAddresses[true], ip) - } - } - } - - if len(destinationIPAddresses[false]) == 0 { - destinationIPAddresses[false] = []string{"*"} - } - if len(destinationIPAddresses[true]) == 0 { - destinationIPAddresses[true] = []string{"*"} - } - - disableFloatingIP := false - if consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) { - disableFloatingIP = true + additionalIPs, err := loadbalancer.AdditionalPublicIPs(service) + if wantLb && err != nil { + return nil, fmt.Errorf("unable to get additional public IPs: %w", err) } - backendIPAddresses := map[bool][]string{} - if wantLb && disableFloatingIP { - lb, exist, err := az.getAzureLoadBalancer(pointer.StringDeref(lbName, ""), azcache.CacheReadTypeDefault) + var accessControl *loadbalancer.AccessControl + { + sg, err := az.getSecurityGroup(azcache.CacheReadTypeDefault) if err != nil { return nil, err } - if !exist { - return nil, fmt.Errorf("unable to get lb %s", pointer.StringDeref(lbName, "")) - } - backendIPAddresses[false], backendIPAddresses[true] = az.LoadBalancerBackendPool.GetBackendPrivateIPs(clusterName, service, lb) - } - - additionalIPs, err := getServiceAdditionalPublicIPs(service) - if err != nil { - return nil, fmt.Errorf("unable to get additional public IPs, error=%w", err) - } - for _, ip := range additionalIPs { - isIPv6 := net.ParseIP(ip).To4() == nil - if len(destinationIPAddresses[isIPv6]) != 1 || destinationIPAddresses[isIPv6][0] != "*" { - destinationIPAddresses[isIPv6] = append(destinationIPAddresses[isIPv6], ip) - } - } - accessControl, err := loadbalancer.NewAccessControl(service) - if err != nil { - klog.ErrorS(err, "Failed to parse access control configuration for service", "service", service.Name) - return nil, err - } - - var ( - sourceRanges = accessControl.SourceRanges() - allowedServiceTags = accessControl.AllowedServiceTags() - allowedIPRanges = accessControl.AllowedIPRanges() - sourceAddressPrefixes = map[bool][]string{ - false: accessControl.IPV4Sources(), - true: accessControl.IPV6Sources(), + var opts []loadbalancer.AccessControlOption + if !wantLb { + // When deleting LB, we don't need to validate the annotation + opts = append(opts, loadbalancer.SkipAnnotationValidation()) } - ) - - if len(sourceRanges) != 0 && len(allowedIPRanges) != 0 { - // Block the service and return error if both of spec.loadBalancerSourceRanges and annotation are specified - klog.Errorf("Service %s is using both of spec.loadBalancerSourceRanges and annotation %s.", service.Name, consts.ServiceAnnotationAllowedIPRanges) - return nil, fmt.Errorf( - "both of spec.loadBalancerSourceRanges and annotation %s are specified for service %s, which is not allowed", - consts.ServiceAnnotationAllowedIPRanges, service.Name, - ) - } - if len(sourceRanges) != 0 && len(allowedServiceTags) != 0 { - // Suggesting to use aks custom annotation instead of spec.loadBalancerSourceRanges - klog.Warningf( - "Service %s is using both of spec.loadBalancerSourceRanges and annotation %s.", - service.Name, consts.ServiceAnnotationAllowedServiceTags, - ) - az.Event(service, v1.EventTypeWarning, "ConflictConfiguration", fmt.Sprintf( - "Please use annotation %s instead of spec.loadBalancerSourceRanges while using %s annotation at the same time.", - consts.ServiceAnnotationAllowedIPRanges, consts.ServiceAnnotationAllowedServiceTags, - )) - } - - var expectedSecurityRules []network.SecurityRule - handleSecurityRules := func(isIPv6 bool) error { - expectedSecurityRulesSingleStack, err := az.getExpectedSecurityRules( - wantLb, ports, - sourceAddressPrefixes[isIPv6], service, - destinationIPAddresses[isIPv6], sourceRanges, - backendIPAddresses[isIPv6], disableFloatingIP, isIPv6, - ) - expectedSecurityRules = append(expectedSecurityRules, expectedSecurityRulesSingleStack...) - return err - } - v4Enabled, v6Enabled := getIPFamiliesEnabled(service) - if v4Enabled { - if err := handleSecurityRules(false); err != nil { - return nil, err - } - } - if v6Enabled { - if err := handleSecurityRules(true); err != nil { - return nil, err - } - } - - // update security rules - dirtySg, updatedRules, err := az.reconcileSecurityRules(sg, service, serviceName, wantLb, expectedSecurityRules, ports, sourceAddressPrefixes, destinationIPAddresses) - if err != nil { - return nil, err - } - - changed := az.ensureSecurityGroupTagged(&sg) - if changed { - dirtySg = true - } - - if dirtySg { - sg.SecurityRules = &updatedRules - klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) - klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): start", *sg.Name) - err := az.CreateOrUpdateSecurityGroup(sg) + accessControl, err = loadbalancer.NewAccessControl(service, &sg, opts...) if err != nil { - klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) + logger.Error(err, "Failed to parse access control configuration for service") return nil, err } - klog.V(10).Infof("CreateOrUpdateSecurityGroup(%q): end", *sg.Name) - _ = az.nsgCache.Delete(pointer.StringDeref(sg.Name, "")) - } - return &sg, nil -} - -func (az *Cloud) reconcileSecurityRules(sg network.SecurityGroup, - service *v1.Service, - serviceName string, - wantLb bool, - expectedSecurityRules []network.SecurityRule, - ports []v1.ServicePort, - sourceAddressPrefixes, destinationIPAddresses map[bool][]string, -) (bool, []network.SecurityRule, error) { - dirtySg := false - var updatedRules []network.SecurityRule - if sg.SecurityGroupPropertiesFormat != nil && sg.SecurityGroupPropertiesFormat.SecurityRules != nil { - updatedRules = *sg.SecurityGroupPropertiesFormat.SecurityRules - } - - for _, r := range updatedRules { - klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) - } - - // update security rules: remove unwanted rules that belong privately - // to this service - for i := len(updatedRules) - 1; i >= 0; i-- { - existingRule := updatedRules[i] - if az.serviceOwnsRule(service, *existingRule.Name) { - klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) - keepRule := false - if findSecurityRule(expectedSecurityRules, existingRule) { - klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) - keepRule = true - } - if !keepRule { - klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) - updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) - dirtySg = true - } + // - use both annotation `service.beta.kubernetes.io/azure-allowed-service-tags` and `spec.loadBalancerSourceRanges` + // WARNING: This issue has been around for a while, and we shouldn’t mess with the existing settings. + if len(accessControl.SourceRanges) > 0 && len(accessControl.AllowedServiceTags) > 0 { + // Suggesting to use aks custom annotation instead of spec.loadBalancerSourceRanges + logger.V(2).Info( + "Service is using both of spec.loadBalancerSourceRanges and annotation service.beta.kubernetes.io/azure-allowed-service-tags", + ) + az.Event(service, v1.EventTypeWarning, "ConflictConfiguration", fmt.Sprintf( + "Please use annotation %s instead of spec.loadBalancerSourceRanges while using %s annotation at the same time.", + consts.ServiceAnnotationAllowedIPRanges, consts.ServiceAnnotationAllowedServiceTags, + )) } } - // update security rules: if the service uses a shared rule and is being deleted, - // then remove it from the shared rule - handleRule := func(isIPv6 bool) { - if useSharedSecurityRule(service) && !wantLb { - for _, port := range ports { - for _, sourceAddressPrefix := range sourceAddressPrefixes[isIPv6] { - sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix, isIPv6) - sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) - if !sharedRuleFound { - klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name) - continue - } - shouldDeleteNSGRule := false - if sharedRule.SecurityRulePropertiesFormat == nil || - sharedRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes == nil || - len(*sharedRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes) == 0 { - shouldDeleteNSGRule = true - } else { - existingPrefixes := *sharedRule.DestinationAddressPrefixes - for _, destinationIPAddress := range destinationIPAddresses[isIPv6] { - addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) - if !found { - klog.Warningf("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name) - continue - } - if len(existingPrefixes) == 1 { - shouldDeleteNSGRule = true - break //shared nsg rule has only one entry and entry owned by deleted svc has been found. skip the rest of the entries - } - newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) - sharedRule.DestinationAddressPrefixes = &newDestinations - updatedRules[sharedIndex] = sharedRule - - dirtySg = true - } - } - - if shouldDeleteNSGRule { - klog.V(4).Infof("shared rule will be deleted because last service %s which refers this rule is deleted.", service.Name) - updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) - dirtySg = true - continue - } - } + var ( + disableFloatingIP = consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) + lbIPAddresses, _ = iputil.ParseAddresses(lbIPs) + lbIPv4Addresses, lbIPv6Addresses = iputil.GroupAddressesByFamily(lbIPAddresses) + additionalIPv4Addresses, additionalIPv6Addresses = iputil.GroupAddressesByFamily(additionalIPs) + backendIPv4Addresses, backendIPv6Addresses []netip.Addr + ) + { + // Get backend node IPs + lb, lbFound, err := az.getAzureLoadBalancer(lbName, azcache.CacheReadTypeDefault) + { + if err != nil { + return nil, err } - } - } - v4Enabled, v6Enabled := getIPFamiliesEnabled(service) - if v4Enabled { - handleRule(consts.IPVersionIPv4) - } - if v6Enabled { - handleRule(consts.IPVersionIPv6) - } - - // update security rules: prepare rules for consolidation - for index, rule := range updatedRules { - if allowsConsolidation(rule) { - updatedRules[index] = makeConsolidatable(rule) - } - } - for index, rule := range expectedSecurityRules { - if allowsConsolidation(rule) { - expectedSecurityRules[index] = makeConsolidatable(rule) - } - } - // update security rules: add needed - for _, expectedRule := range expectedSecurityRules { - foundRule := false - if findSecurityRule(updatedRules, expectedRule) { - klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) - foundRule = true - } - if foundRule && allowsConsolidation(expectedRule) { - index, _ := findConsolidationCandidate(updatedRules, expectedRule) - if updatedRules[index].DestinationAddressPrefixes != nil { - updatedRules[index] = consolidate(updatedRules[index], expectedRule) - } else { - updatedRules = append(updatedRules[:index], updatedRules[index+1:]...) + if wantLb && !lbFound { + logger.Error(err, "Failed to get load balancer") + return nil, fmt.Errorf("unable to get lb %s", lbName) } - dirtySg = true } - if !foundRule && wantLb { - klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) - - nextAvailablePriority, err := getNextAvailablePriority(updatedRules) - if err != nil { - return false, nil, err - } - - expectedRule.Priority = pointer.Int32(nextAvailablePriority) - updatedRules = append(updatedRules, expectedRule) - dirtySg = true + var backendIPv4List, backendIPv6List []string + if lbFound { + backendIPv4List, backendIPv6List = az.LoadBalancerBackendPool.GetBackendPrivateIPs(clusterName, service, lb) } + backendIPv4Addresses, _ = iputil.ParseAddresses(backendIPv4List) + backendIPv6Addresses, _ = iputil.ParseAddresses(backendIPv6List) } - updatedRules = removeDuplicatedSecurityRules(updatedRules) - - for _, r := range updatedRules { - klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) - } + { + // Disassociate all IPs from the security group + dstIPv4Addresses := append(lbIPv4Addresses, backendIPv4Addresses...) + dstIPv4Addresses = append(dstIPv4Addresses, additionalIPv4Addresses...) - return dirtySg, updatedRules, nil -} + dstIPv6Addresses := append(lbIPv6Addresses, backendIPv6Addresses...) + dstIPv6Addresses = append(dstIPv6Addresses, additionalIPv6Addresses...) -func (az *Cloud) getExpectedSecurityRules( - wantLb bool, - ports []v1.ServicePort, - sourceAddressPrefixes []string, - service *v1.Service, - destinationIPAddresses []string, - sourceRanges []netip.Prefix, - backendIPAddresses []string, - disableFloatingIP, isIPv6 bool, -) ([]network.SecurityRule, error) { - expectedSecurityRules := []network.SecurityRule{} + accessControl.CleanSecurityGroup(dstIPv4Addresses, dstIPv6Addresses) + } if wantLb { - expectedSecurityRules = make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes)) - - for i, port := range ports { - _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) - if err != nil { - return nil, err - } - dstPort := port.Port - if disableFloatingIP { - dstPort = port.NodePort - } - for j := range sourceAddressPrefixes { - ix := i*len(sourceAddressPrefixes) + j - securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j], isIPv6) - nsgRule := network.SecurityRule{ - Name: pointer.String(securityRuleName), - SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ - Protocol: *securityProto, - SourcePortRange: pointer.String("*"), - DestinationPortRange: pointer.String(strconv.Itoa(int(dstPort))), - SourceAddressPrefix: pointer.String(sourceAddressPrefixes[j]), - Access: network.SecurityRuleAccessAllow, - Direction: network.SecurityRuleDirectionInbound, - }, - } + var ( + dstIPv4Addresses = additionalIPv4Addresses + dstIPv6Addresses = additionalIPv6Addresses + ) - if len(destinationIPAddresses) == 1 && disableFloatingIP { - nsgRule.DestinationAddressPrefixes = &(backendIPAddresses) - } else if len(destinationIPAddresses) == 1 && !disableFloatingIP { - // continue to use DestinationAddressPrefix to avoid NSG updates for existing rules. - nsgRule.DestinationAddressPrefix = pointer.String(destinationIPAddresses[0]) - } else { - nsgRule.DestinationAddressPrefixes = &(destinationIPAddresses) - } - expectedSecurityRules[ix] = nsgRule - } + if disableFloatingIP { + // use the backend node IPs + dstIPv4Addresses = append(dstIPv4Addresses, backendIPv4Addresses...) + dstIPv6Addresses = append(dstIPv6Addresses, backendIPv6Addresses...) + } else { + // use the LoadBalancer IPs + dstIPv4Addresses = append(dstIPv4Addresses, lbIPv4Addresses...) + dstIPv6Addresses = append(dstIPv6Addresses, lbIPv6Addresses...) } - shouldAddDenyRule := false - if len(sourceRanges) > 0 && !loadbalancer.IsCIDRsAllowAll(sourceRanges) { - if v, ok := service.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges]; ok && strings.EqualFold(v, consts.TrueAnnotationValue) { - shouldAddDenyRule = true - } - } - if shouldAddDenyRule { - for _, port := range ports { - _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) - if err != nil { - return nil, err - } - securityRuleName := az.getSecurityRuleName(service, port, "deny_all", isIPv6) - nsgRule := network.SecurityRule{ - Name: pointer.String(securityRuleName), - SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ - Protocol: *securityProto, - SourcePortRange: pointer.String("*"), - DestinationPortRange: pointer.String(strconv.Itoa(int(port.Port))), - SourceAddressPrefix: pointer.String("*"), - Access: network.SecurityRuleAccessDeny, - Direction: network.SecurityRuleDirectionInbound, - }, - } - if len(destinationIPAddresses) == 1 { - // continue to use DestinationAddressPrefix to avoid NSG updates for existing rules. - nsgRule.DestinationAddressPrefix = pointer.String(destinationIPAddresses[0]) - } else { - nsgRule.DestinationAddressPrefixes = &(destinationIPAddresses) - } - expectedSecurityRules = append(expectedSecurityRules, nsgRule) - } + err := accessControl.PatchSecurityGroup(dstIPv4Addresses, dstIPv6Addresses) + if err != nil { + logger.Error(err, "Failed to patch security group") + return nil, err } } - for _, r := range expectedSecurityRules { - klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %v %v :%s", service.Name, pointer.StringDeref(r.SourceAddressPrefix, ""), pointer.StringDeref(r.SourcePortRange, ""), pointer.StringDeref(r.DestinationAddressPrefix, ""), stringSlice(r.DestinationAddressPrefixes), pointer.StringDeref(r.DestinationPortRange, "")) + rv, updated, err := accessControl.SecurityGroup() + if err != nil { + err = fmt.Errorf("unable to apply access control configuration to security group: %w", err) + logger.Error(err, "Failed to get security group after patching") + return nil, err + } + if az.ensureSecurityGroupTagged(rv) { + updated = true + } + + if updated { + logger.V(2).Info("Preparing to update security group") + logger.V(10).Info("CreateOrUpdateSecurityGroup begin") + err := az.CreateOrUpdateSecurityGroup(*rv) + if err != nil { + logger.Error(err, "Failed to update security group") + return nil, err + } + logger.V(10).Info("CreateOrUpdateSecurityGroup end") + _ = az.nsgCache.Delete(pointer.StringDeref(rv.Name, "")) } - return expectedSecurityRules, nil + return rv, nil } func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (bool, error) { @@ -3206,142 +2961,10 @@ func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Servic return existsLb && service.ObjectMeta.DeletionTimestamp == nil && service.Spec.Type == v1.ServiceTypeLoadBalancer, nil } -func logSafe(s *string) string { - if s == nil { - return "(nil)" - } - return *s -} - -func logSafeCollection(s *string, strs *[]string) string { - if s == nil { - if strs == nil { - return "(nil)" - } - return "[" + strings.Join(*strs, ",") + "]" - } - return *s -} - -func findSecurityRuleByName(rules []network.SecurityRule, ruleName string) (int, network.SecurityRule, bool) { - for index, rule := range rules { - if rule.Name != nil && strings.EqualFold(*rule.Name, ruleName) { - return index, rule, true - } - } - return 0, network.SecurityRule{}, false -} - -func findIndex(strs []string, s string) (int, bool) { - for index, str := range strs { - if strings.EqualFold(str, s) { - return index, true - } - } - return 0, false -} - func allowsConsolidation(rule network.SecurityRule) bool { return strings.HasPrefix(pointer.StringDeref(rule.Name, ""), "shared") } -func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) { - for index, r := range rules { - if allowsConsolidation(r) { - if strings.EqualFold(pointer.StringDeref(r.Name, ""), pointer.StringDeref(rule.Name, "")) { - return index, true - } - } - } - - return 0, false -} - -func makeConsolidatable(rule network.SecurityRule) network.SecurityRule { - return network.SecurityRule{ - Name: rule.Name, - SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ - Priority: rule.Priority, - Protocol: rule.Protocol, - SourcePortRange: rule.SourcePortRange, - SourcePortRanges: rule.SourcePortRanges, - DestinationPortRange: rule.DestinationPortRange, - DestinationPortRanges: rule.DestinationPortRanges, - SourceAddressPrefix: rule.SourceAddressPrefix, - SourceAddressPrefixes: rule.SourceAddressPrefixes, - DestinationAddressPrefixes: collectionOrSingle(rule.DestinationAddressPrefixes, rule.DestinationAddressPrefix), - Access: rule.Access, - Direction: rule.Direction, - }, - } -} - -func consolidate(existingRule network.SecurityRule, newRule network.SecurityRule) network.SecurityRule { - destinations := appendElements(existingRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes, newRule.DestinationAddressPrefix, newRule.DestinationAddressPrefixes) - destinations = deduplicate(destinations) // there are transient conditions during controller startup where it tries to add a service that is already added - - return network.SecurityRule{ - Name: existingRule.Name, - SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ - Priority: existingRule.Priority, - Protocol: existingRule.Protocol, - SourcePortRange: existingRule.SourcePortRange, - SourcePortRanges: existingRule.SourcePortRanges, - DestinationPortRange: existingRule.DestinationPortRange, - DestinationPortRanges: existingRule.DestinationPortRanges, - SourceAddressPrefix: existingRule.SourceAddressPrefix, - SourceAddressPrefixes: existingRule.SourceAddressPrefixes, - DestinationAddressPrefixes: destinations, - Access: existingRule.Access, - Direction: existingRule.Direction, - }, - } -} - -func collectionOrSingle(collection *[]string, s *string) *[]string { - if collection != nil && len(*collection) > 0 { - return collection - } - if s == nil { - return &[]string{} - } - return &[]string{*s} -} - -func appendElements(collection *[]string, appendString *string, appendStrings *[]string) *[]string { - newCollection := []string{} - - if collection != nil { - newCollection = append(newCollection, *collection...) - } - if appendString != nil { - newCollection = append(newCollection, *appendString) - } - if appendStrings != nil { - newCollection = append(newCollection, *appendStrings...) - } - - return &newCollection -} - -func deduplicate(collection *[]string) *[]string { - if collection == nil { - return nil - } - - seen := map[string]bool{} - result := make([]string, 0, len(*collection)) - - for _, v := range *collection { - if !seen[v] { - seen[v] = true - result = append(result, v) - } - } - - return &result -} - // Determine if we should release existing owned public IPs func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lbShouldExist, lbIsInternal, isUserAssignedPIP bool, desiredPipName string, ipTagRequest serviceIPTagRequest) bool { // skip deleting user created pip diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go index 1c81fb761..5735ed7f1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_loadbalancer_backendpool.go @@ -20,7 +20,7 @@ import ( reflect "reflect" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" v1 "k8s.io/api/core/v1" ) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go index c4102e2e9..833fb38bc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go @@ -23,7 +23,7 @@ import ( compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" azure "github.com/Azure/go-autorest/autorest/azure" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" v1 "k8s.io/api/core/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index 8256dc219..ca2a1bdae 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -246,10 +246,6 @@ func getIPConfigByIPFamily(nic network.Interface, IPv6 bool) (*network.Interface return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, pointer.StringDeref(nic.Name, "")) } -func isInternalLoadBalancer(lb *network.LoadBalancer) bool { - return strings.HasSuffix(*lb.Name, consts.InternalLoadBalancerNameSuffix) -} - // getBackendPoolName the LB BackendPool name for a service. // to ensure backward and forward compat: // SingleStack -v4 (pre v1.16) => BackendPool name == clusterName @@ -342,7 +338,14 @@ func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service, isIPv6 pipName = fmt.Sprintf("%s-%s", pipName, id) } } - return getResourceByIPFamily(pipName, isDualStack, isIPv6), nil + + pipNameSegment := pipName + maxLength := consts.PIPPrefixNameMaxLength - consts.IPFamilySuffixLength + if len(pipName) > maxLength { + pipNameSegment = pipNameSegment[:maxLength] + klog.V(6).Infof("original PIP name is lengthy %q, truncate it to %q", pipName, pipNameSegment) + } + return getResourceByIPFamily(pipNameSegment, isDualStack, isIPv6), nil } func publicIPOwnsFrontendIP(service *v1.Service, fip *network.FrontendIPConfiguration, pip *network.PublicIPAddress) bool { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index 52d296292..a8d683efc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -26,13 +26,14 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + privatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" "sigs.k8s.io/cloud-provider-azure/pkg/cache" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" @@ -344,7 +345,11 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou } if pointer.BoolDeref(accountOptions.CreatePrivateEndpoint, false) { - if _, err := az.privatednsclient.Get(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { + clientFactory := az.NetworkClientFactory + if clientFactory == nil { + clientFactory = az.ComputeClientFactory + } + if _, err := clientFactory.GetPrivateZoneClient().Get(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { klog.V(2).Infof("get private dns zone %s returned with %v", privateDNSZoneName, err.Error()) // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup if err := az.createPrivateDNSZone(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { @@ -354,7 +359,7 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou // Create virtual link to the private DNS zone vNetLinkName := vnetName + "-vnetlink" - if _, err := az.virtualNetworkLinksClient.Get(ctx, vnetResourceGroup, privateDNSZoneName, vNetLinkName); err != nil { + if _, err := clientFactory.GetVirtualNetworkLinkClient().Get(ctx, vnetResourceGroup, privateDNSZoneName, vNetLinkName); err != nil { klog.V(2).Infof("get virtual link for vnet(%s) and DNS Zone(%s) returned with %v", vnetName, privateDNSZoneName, err.Error()) if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName); err != nil { return "", "", fmt.Errorf("create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s): %w", vnetName, privateDNSZoneName, vnetResourceGroup, err) @@ -609,27 +614,44 @@ func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup, pr klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", privateDNSZoneName, vnetResourceGroup) location := LocationGlobal privateDNSZone := privatedns.PrivateZone{Location: &location} - if err := az.privatednsclient.CreateOrUpdate(ctx, vnetResourceGroup, privateDNSZoneName, privateDNSZone, "", true); err != nil { - if strings.Contains(err.Error().Error(), "exists already") { + var clientFactory azclient.ClientFactory + if az.NetworkClientFactory != nil { + clientFactory = az.NetworkClientFactory + } else { + clientFactory = az.ComputeClientFactory + } + privatednsclient := clientFactory.GetPrivateZoneClient() + + if _, err := privatednsclient.CreateOrUpdate(ctx, vnetResourceGroup, privateDNSZoneName, privateDNSZone); err != nil { + if strings.Contains(err.Error(), "exists already") { klog.V(2).Infof("private dns zone(%s) in resourceGroup (%s) already exists", privateDNSZoneName, vnetResourceGroup) return nil } - return err.Error() + return err } return nil } func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, privateDNSZoneName, vnetResourceGroup) + var clientFactory azclient.ClientFactory + if az.cloud.NetworkClientFactory != nil { + clientFactory = az.cloud.NetworkClientFactory + } else { + clientFactory = az.cloud.ComputeClientFactory + } + vnetLinkClient := clientFactory.GetVirtualNetworkLinkClient() + location := LocationGlobal vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s", az.SubscriptionID, vnetResourceGroup, vnetName) parameters := privatedns.VirtualNetworkLink{ Location: &location, - VirtualNetworkLinkProperties: &privatedns.VirtualNetworkLinkProperties{ + Properties: &privatedns.VirtualNetworkLinkProperties{ VirtualNetwork: &privatedns.SubResource{ID: &vnetID}, RegistrationEnabled: pointer.Bool(false)}, } - return az.virtualNetworkLinksClient.CreateOrUpdate(ctx, vnetResourceGroup, privateDNSZoneName, vNetLinkName, parameters, "", false).Error() + _, err := vnetLinkClient.CreateOrUpdate(ctx, vnetResourceGroup, privateDNSZoneName, vNetLinkName, parameters) + return err } func (az *Cloud) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName, privateEndpointName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 3c069f0a2..70ae36338 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -196,31 +196,6 @@ func getExtendedLocationTypeFromString(extendedLocationType string) network.Exte return network.EdgeZone } -func getServiceAdditionalPublicIPs(service *v1.Service) ([]string, error) { - if service == nil { - return nil, nil - } - - result := []string{} - if val, ok := service.Annotations[consts.ServiceAnnotationAdditionalPublicIPs]; ok { - pips := strings.Split(strings.TrimSpace(val), ",") - for _, pip := range pips { - ip := strings.TrimSpace(pip) - if ip == "" { - continue // skip empty string - } - - if net.ParseIP(ip) == nil { - return nil, fmt.Errorf("%s is not a valid IP address", ip) - } - - result = append(result, ip) - } - } - - return result, nil -} - func getNodePrivateIPAddress(node *v1.Node, isIPv6 bool) string { for _, nodeAddress := range node.Status.Addresses { if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeInternalIP)) && @@ -587,3 +562,7 @@ func getResourceGroupAndNameFromNICID(ipConfigurationID string) (string, string, } return nicResourceGroup, nicName, nil } + +func isInternalLoadBalancer(lb *network.LoadBalancer) bool { + return strings.HasSuffix(strings.ToLower(*lb.Name), consts.InternalLoadBalancerNameSuffix) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go index c71758469..05e393d6d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go @@ -21,182 +21,302 @@ import ( "net/netip" "strings" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup" ) -// IsInternal returns true if the given service is internal load balancer. -func IsInternal(svc *v1.Service) bool { - value, found := svc.Annotations[consts.ServiceAnnotationLoadBalancerInternal] - return found && strings.ToLower(value) == "true" -} - -// IsExternal returns true if the given service is external load balancer. -func IsExternal(svc *v1.Service) bool { - return !IsInternal(svc) -} +var ( + ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges = fmt.Errorf( + "cannot set both spec.LoadBalancerSourceRanges and service annotation %s", consts.ServiceAnnotationAllowedIPRanges, + ) +) -// AllowedServiceTags returns the allowed service tags configured by user through AKS custom annotation. -func AllowedServiceTags(svc *v1.Service) ([]string, error) { - const Sep = "," +type AccessControl struct { + logger klog.Logger + svc *v1.Service + sgHelper *securitygroup.RuleHelper - value, found := svc.Annotations[consts.ServiceAnnotationAllowedServiceTags] - if !found { - return nil, nil - } + // immutable pre-compute states. + SourceRanges []netip.Prefix + AllowedIPRanges []netip.Prefix + AllowedServiceTags []string + securityRuleDestinationPortsByProtocol map[network.SecurityRuleProtocol][]int32 +} - return strings.Split(strings.TrimSpace(value), Sep), nil +type accessControlOptions struct { + SkipAnnotationValidation bool } -// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotation. -func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, error) { - const Sep = "," +var defaultAccessControlOptions = accessControlOptions{ + SkipAnnotationValidation: false, +} - value, found := svc.Annotations[consts.ServiceAnnotationAllowedIPRanges] - if !found { - return nil, nil - } +type AccessControlOption func(*accessControlOptions) - rv, err := ParseCIDRs(strings.Split(strings.TrimSpace(value), Sep)) - if err != nil { - return nil, fmt.Errorf("invalid service annotation %s:%s: %w", consts.ServiceAnnotationAllowedIPRanges, value, err) +func SkipAnnotationValidation() AccessControlOption { + return func(o *accessControlOptions) { + o.SkipAnnotationValidation = true } - - return rv, nil } -// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges` and standard annotation. -// If `spec.LoadBalancerSourceRanges` is not set, it will try to parse the annotation. -func SourceRanges(svc *v1.Service) ([]netip.Prefix, error) { - if len(svc.Spec.LoadBalancerSourceRanges) > 0 { - rv, err := ParseCIDRs(svc.Spec.LoadBalancerSourceRanges) - if err != nil { - return nil, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, err) - } - return rv, nil - } +func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...AccessControlOption) (*AccessControl, error) { + logger := klog.Background(). + WithName("LoadBalancer.AccessControl"). + WithValues("service-name", svc.Name). + WithValues("security-group-name", ptr.To(sg.Name)) - const Sep = "," - value, found := svc.Annotations[v1.AnnotationLoadBalancerSourceRangesKey] - if !found { - return nil, nil + options := defaultAccessControlOptions + for _, opt := range opts { + opt(&options) } - rv, err := ParseCIDRs(strings.Split(strings.TrimSpace(value), Sep)) + + sgHelper, err := securitygroup.NewSecurityGroupHelper(sg) if err != nil { - return nil, fmt.Errorf("invalid service annotation %s:%s: %w", v1.AnnotationLoadBalancerSourceRangesKey, value, err) + logger.Error(err, "Failed to initialize RuleHelper") + return nil, err } - return rv, nil -} - -type AccessControl struct { - svc *v1.Service - - // immutable redundant states. - sourceRanges []netip.Prefix - allowedIPRanges []netip.Prefix - allowedServiceTags []string -} - -func NewAccessControl(svc *v1.Service) (*AccessControl, error) { sourceRanges, err := SourceRanges(svc) - if err != nil { + if err != nil && !options.SkipAnnotationValidation { + logger.Error(err, "Failed to parse SourceRange configuration") return nil, err } allowedIPRanges, err := AllowedIPRanges(svc) - if err != nil { + if err != nil && !options.SkipAnnotationValidation { + logger.Error(err, "Failed to parse AllowedIPRanges configuration") return nil, err } allowedServiceTags, err := AllowedServiceTags(svc) + if err != nil && !options.SkipAnnotationValidation { + logger.Error(err, "Failed to parse AllowedServiceTags configuration") + return nil, err + } + securityRuleDestinationPortsByProtocol, err := securityRuleDestinationPortsByProtocol(svc) if err != nil { + logger.Error(err, "Failed to parse service spec.Ports") return nil, err } + if len(sourceRanges) > 0 && len(allowedIPRanges) > 0 { + logger.Error(err, "Forbidden configuration") + return nil, ErrSetBothLoadBalancerSourceRangesAndAllowedIPRanges + } return &AccessControl{ - svc: svc, - sourceRanges: sourceRanges, - allowedIPRanges: allowedIPRanges, - allowedServiceTags: allowedServiceTags, + logger: logger, + svc: svc, + sgHelper: sgHelper, + SourceRanges: sourceRanges, + AllowedIPRanges: allowedIPRanges, + AllowedServiceTags: allowedServiceTags, + securityRuleDestinationPortsByProtocol: securityRuleDestinationPortsByProtocol, }, nil } -// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges` and standard annotation. -func (ac *AccessControl) SourceRanges() []netip.Prefix { - return ac.sourceRanges -} - -// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotation. -func (ac *AccessControl) AllowedIPRanges() []netip.Prefix { - return ac.allowedIPRanges -} - -// AllowedServiceTags returns the allowed service tags configured by user through AKS custom annotation. -func (ac *AccessControl) AllowedServiceTags() []string { - return ac.allowedServiceTags -} - // IsAllowFromInternet returns true if the given service is allowed to be accessed from internet. // To be specific, // 1. For all types of LB, it returns false if the given service is specified with `service tags` or `not allowed all IP ranges`. // 2. For internal LB, it returns true iff the given service is explicitly specified with `allowed all IP ranges`. Refer: https://github.com/kubernetes-sigs/cloud-provider-azure/issues/698 func (ac *AccessControl) IsAllowFromInternet() bool { - if len(ac.allowedServiceTags) > 0 { + if len(ac.AllowedServiceTags) > 0 { return false } - if len(ac.sourceRanges) > 0 && !IsCIDRsAllowAll(ac.sourceRanges) { + if len(ac.SourceRanges) > 0 && !iputil.IsPrefixesAllowAll(ac.SourceRanges) { return false } - if len(ac.allowedIPRanges) > 0 && !IsCIDRsAllowAll(ac.allowedIPRanges) { + if len(ac.AllowedIPRanges) > 0 && !iputil.IsPrefixesAllowAll(ac.AllowedIPRanges) { return false } - if IsExternal(ac.svc) { + if !IsInternal(ac.svc) { return true } // Internal LB with explicit allowedAll IP ranges is allowed to be accessed from internet. - return len(ac.allowedIPRanges) > 0 || len(ac.sourceRanges) > 0 + return len(ac.AllowedIPRanges) > 0 || len(ac.SourceRanges) > 0 } -// IPV4Sources returns the allowed sources for IPv4. -func (ac *AccessControl) IPV4Sources() []string { - var rv []string +// DenyAllExceptSourceRanges returns true if it needs to block any VNet traffic not on the allow list. +// By default, NSG allow traffic from the VNet. +func (ac *AccessControl) DenyAllExceptSourceRanges() bool { + var ( + annotationEnabled = strings.EqualFold(ac.svc.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges], "true") + sourceRangeSpecified = len(ac.SourceRanges) > 0 || len(ac.AllowedIPRanges) > 0 + ) + return annotationEnabled && sourceRangeSpecified +} - if ac.IsAllowFromInternet() { - rv = append(rv, "Internet") - } - for _, cidr := range ac.sourceRanges { +// AllowedIPv4Ranges returns the IPv4 ranges that are allowed to access the LoadBalancer. +func (ac *AccessControl) AllowedIPv4Ranges() []netip.Prefix { + var rv []netip.Prefix + for _, cidr := range ac.SourceRanges { if cidr.Addr().Is4() { - rv = append(rv, cidr.String()) + rv = append(rv, cidr) } } - for _, cidr := range ac.allowedIPRanges { + for _, cidr := range ac.AllowedIPRanges { if cidr.Addr().Is4() { - rv = append(rv, cidr.String()) + rv = append(rv, cidr) } } - rv = append(rv, ac.allowedServiceTags...) + return rv +} +// AllowedIPv6Ranges returns the IPv6 ranges that are allowed to access the LoadBalancer. +func (ac *AccessControl) AllowedIPv6Ranges() []netip.Prefix { + var rv []netip.Prefix + for _, cidr := range ac.SourceRanges { + if cidr.Addr().Is6() { + rv = append(rv, cidr) + } + } + for _, cidr := range ac.AllowedIPRanges { + if cidr.Addr().Is6() { + rv = append(rv, cidr) + } + } return rv } -// IPV6Sources returns the allowed sources for IPv6. -func (ac *AccessControl) IPV6Sources() []string { +// PatchSecurityGroup checks and adds rules for the given destination IP addresses. +func (ac *AccessControl) PatchSecurityGroup(dstIPv4Addresses, dstIPv6Addresses []netip.Addr) error { + logger := ac.logger.WithName("PatchSecurityGroup") + var ( - rv []string + allowedIPv4Ranges = ac.AllowedIPv4Ranges() + allowedIPv6Ranges = ac.AllowedIPv6Ranges() + allowedServiceTags = ac.AllowedServiceTags ) if ac.IsAllowFromInternet() { - rv = append(rv, "Internet") + allowedServiceTags = append(allowedServiceTags, securitygroup.ServiceTagInternet) } - for _, cidr := range ac.sourceRanges { - if cidr.Addr().Is6() { - rv = append(rv, cidr.String()) + + logger.V(10).Info("Start patching", + "num-allowed-ipv4-ranges", len(allowedIPv4Ranges), + "num-allowed-ipv6-ranges", len(allowedIPv6Ranges), + "num-allowed-service-tags", len(allowedServiceTags), + ) + + protocols := []network.SecurityRuleProtocol{ + network.SecurityRuleProtocolTCP, + network.SecurityRuleProtocolUDP, + network.SecurityRuleProtocolAsterisk, + } + + for _, protocol := range protocols { + dstPorts, found := ac.securityRuleDestinationPortsByProtocol[protocol] + if !found { + continue + } + if len(dstIPv4Addresses) > 0 { + for _, tag := range allowedServiceTags { + err := ac.sgHelper.AddRuleForAllowedServiceTag(tag, protocol, dstIPv4Addresses, dstPorts) + if err != nil { + return fmt.Errorf("add rule for allowed service tag on IPv4: %w", err) + } + } + + if len(allowedIPv4Ranges) > 0 { + err := ac.sgHelper.AddRuleForAllowedIPRanges(allowedIPv4Ranges, protocol, dstIPv4Addresses, dstPorts) + if err != nil { + return fmt.Errorf("add rule for allowed IP ranges on IPv4: %w", err) + } + } + } + if len(dstIPv6Addresses) > 0 { + for _, tag := range allowedServiceTags { + err := ac.sgHelper.AddRuleForAllowedServiceTag(tag, protocol, dstIPv6Addresses, dstPorts) + if err != nil { + return fmt.Errorf("add rule for allowed service tag on IPv6: %w", err) + } + } + + if len(allowedIPv6Ranges) > 0 { + err := ac.sgHelper.AddRuleForAllowedIPRanges(allowedIPv6Ranges, protocol, dstIPv6Addresses, dstPorts) + if err != nil { + return fmt.Errorf("add rule for allowed IP ranges on IPv6: %w", err) + } + } } } - for _, cidr := range ac.allowedIPRanges { - if cidr.Addr().Is6() { - rv = append(rv, cidr.String()) + + if ac.DenyAllExceptSourceRanges() { + if len(dstIPv4Addresses) > 0 { + if err := ac.sgHelper.AddRuleForDenyAll(dstIPv4Addresses); err != nil { + return fmt.Errorf("add rule for deny all on IPv4: %w", err) + } + } + if len(dstIPv6Addresses) > 0 { + if err := ac.sgHelper.AddRuleForDenyAll(dstIPv6Addresses); err != nil { + return fmt.Errorf("add rule for deny all on IPv6: %w", err) + } } } - rv = append(rv, ac.allowedServiceTags...) - return rv + logger.V(10).Info("Completed patching") + + return nil +} + +// CleanSecurityGroup removes the given IP addresses from the SecurityGroup. +func (ac *AccessControl) CleanSecurityGroup(dstIPv4Addresses, dstIPv6Addresses []netip.Addr) { + logger := ac.logger.WithName("CleanSecurityGroup"). + WithValues("num-dst-ipv4-addresses", len(dstIPv4Addresses)). + WithValues("num-dst-ipv6-addresses", len(dstIPv6Addresses)) + logger.V(10).Info("Start cleaning") + + var ( + prefixes = fnutil.Map(func(addr netip.Addr) string { + return addr.String() + }, append(dstIPv4Addresses, dstIPv6Addresses...)) + ) + + ac.sgHelper.RemoveDestinationPrefixesFromRules(prefixes) + + logger.V(10).Info("Completed cleaning") +} + +// SecurityGroup returns the SecurityGroup object with patched rules and indicates if the rules had been changed. +// There are mainly two operations to alter the SecurityGroup: +// 1. `PatchSecurityGroup`: Add rules for the given destination IP addresses. +// 2. `CleanSecurityGroup`: Remove the given destination IP addresses from all rules. +// It would return unchanged SecurityGroup and `false` if the operations undo each other. +func (ac *AccessControl) SecurityGroup() (*network.SecurityGroup, bool, error) { + return ac.sgHelper.SecurityGroup() +} + +// securityRuleDestinationPortsByProtocol returns the service ports grouped by SecurityGroup protocol. +func securityRuleDestinationPortsByProtocol(svc *v1.Service) (map[network.SecurityRuleProtocol][]int32, error) { + convert := func(protocol v1.Protocol) (network.SecurityRuleProtocol, error) { + switch protocol { + case v1.ProtocolTCP: + return network.SecurityRuleProtocolTCP, nil + case v1.ProtocolUDP: + return network.SecurityRuleProtocolUDP, nil + case v1.ProtocolSCTP: + return network.SecurityRuleProtocolAsterisk, nil + } + return "", fmt.Errorf("unsupported protocol %s", protocol) + } + + rv := make(map[network.SecurityRuleProtocol][]int32) + for _, port := range svc.Spec.Ports { + protocol, err := convert(port.Protocol) + if err != nil { + return nil, err + } + + var p int32 + if consts.IsK8sServiceDisableLoadBalancerFloatingIP(svc) { + p = port.NodePort + } else { + p = port.Port + } + + rv[protocol] = append(rv[protocol], p) + } + return rv, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go new file mode 100644 index 000000000..436bca4b1 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go @@ -0,0 +1,134 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +import ( + "fmt" + "net/netip" + "strings" + + v1 "k8s.io/api/core/v1" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" +) + +// IsInternal returns true if the given service is internal load balancer. +func IsInternal(svc *v1.Service) bool { + value, found := svc.Annotations[consts.ServiceAnnotationLoadBalancerInternal] + return found && strings.ToLower(value) == "true" +} + +// AllowedServiceTags returns the allowed service tags configured by user through AKS custom annotation. +func AllowedServiceTags(svc *v1.Service) ([]string, error) { + const ( + Sep = "," + Key = consts.ServiceAnnotationAllowedServiceTags + ) + + value, found := svc.Annotations[Key] + if !found { + return nil, nil + } + + return strings.Split(strings.TrimSpace(value), Sep), nil +} + +// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotation. +func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, error) { + const ( + Sep = "," + Key = consts.ServiceAnnotationAllowedIPRanges + ) + + value, found := svc.Annotations[Key] + if !found { + return nil, nil + } + + rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) + if err != nil { + return nil, NewErrAnnotationValue(Key, value, err) + } + + return rv, nil +} + +// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges` and standard annotation. +// If `spec.LoadBalancerSourceRanges` is not set, it will try to parse the annotation. +func SourceRanges(svc *v1.Service) ([]netip.Prefix, error) { + // Read from spec + if len(svc.Spec.LoadBalancerSourceRanges) > 0 { + rv, err := iputil.ParsePrefixes(svc.Spec.LoadBalancerSourceRanges) + if err != nil { + return nil, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, err) + } + return rv, nil + } + + // Read from annotation + const ( + Sep = "," + Key = v1.AnnotationLoadBalancerSourceRangesKey + ) + value, found := svc.Annotations[Key] + if !found { + return nil, nil + } + rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) + if err != nil { + return nil, NewErrAnnotationValue(Key, value, err) + } + return rv, nil +} + +func AdditionalPublicIPs(svc *v1.Service) ([]netip.Addr, error) { + const ( + Sep = "," + Key = consts.ServiceAnnotationAdditionalPublicIPs + ) + + value, found := svc.Annotations[Key] + if !found { + return nil, nil + } + + rv, err := iputil.ParseAddresses(strings.Split(strings.TrimSpace(value), Sep)) + if err != nil { + return nil, NewErrAnnotationValue(Key, value, err) + } + + return rv, nil +} + +type ErrAnnotationValue struct { + AnnotationKey string + AnnotationValue string + Inner error +} + +func NewErrAnnotationValue(key, value string, inner error) *ErrAnnotationValue { + return &ErrAnnotationValue{ + AnnotationKey: key, + AnnotationValue: value, + Inner: inner, + } +} + +func (err *ErrAnnotationValue) Error() string { + return fmt.Sprintf("invalid service annotation %s:%s: %s", err.AnnotationKey, err.AnnotationValue, err.Inner) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/map.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/map.go new file mode 100644 index 000000000..0d55042f5 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/map.go @@ -0,0 +1,25 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fnutil + +func Values[K comparable, V any](m map[K]V) []V { + rv := make([]V, 0, len(m)) + for _, v := range m { + rv = append(rv, v) + } + return rv +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/slice.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/slice.go new file mode 100644 index 000000000..210fe1d97 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil/slice.go @@ -0,0 +1,44 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fnutil + +func Map[T any, R any](f func(T) R, xs []T) []R { + rv := make([]R, len(xs)) + for i, x := range xs { + rv[i] = f(x) + } + return rv +} + +func RemoveIf[T any](f func(T) bool, xs []T) []T { + var rv []T + for _, x := range xs { + if !f(x) { + rv = append(rv, x) + } + } + return rv +} + +func IsAll[T any](f func(T) bool, xs []T) bool { + for _, x := range xs { + if !f(x) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/addr.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/addr.go new file mode 100644 index 000000000..9e1a47ff1 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/addr.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package iputil + +import ( + "fmt" + "net/netip" +) + +func ParseAddresses(vs []string) ([]netip.Addr, error) { + var rv []netip.Addr + for _, v := range vs { + addr, err := netip.ParseAddr(v) + if err != nil { + return nil, fmt.Errorf("invalid CIDR `%s`: %w", v, err) + } + rv = append(rv, addr) + } + return rv, nil +} + +func GroupAddressesByFamily(vs []netip.Addr) ([]netip.Addr, []netip.Addr) { + var ( + v4 []netip.Addr + v6 []netip.Addr + ) + for _, v := range vs { + if v.Is4() { + v4 = append(v4, v) + } else { + v6 = append(v6, v) + } + } + return v4, v6 +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/family.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/family.go new file mode 100644 index 000000000..9ac42b92f --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/family.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package iputil + +import ( + "net/netip" + + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" +) + +type Family string + +const ( + IPv4 Family = "IPv4" + IPv6 Family = "IPv6" +) + +func FamilyOfAddr(addr netip.Addr) Family { + if addr.Is4() { + return IPv4 + } + return IPv6 +} + +func ArePrefixesFromSameFamily(prefixes []netip.Prefix) bool { + if len(prefixes) <= 1 { + return true + } + return fnutil.IsAll(func(p netip.Prefix) bool { + return p.Addr().Is4() == prefixes[0].Addr().Is4() + }, prefixes) +} + +func AreAddressesFromSameFamily(addresses []netip.Addr) bool { + if len(addresses) <= 1 { + return true + } + return fnutil.IsAll(func(p netip.Addr) bool { + return p.Is4() == addresses[0].Is4() + }, addresses) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/netip.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go similarity index 56% rename from vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/netip.go rename to vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go index c7e4632b6..a5644bfbc 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/netip.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go @@ -14,35 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package loadbalancer +package iputil import ( "fmt" "net/netip" ) -const ( - IPv4AllowedAll = "0.0.0.0/0" - IPv6AllowedAll = "::/0" -) - -// IsCIDRsAllowAll return true if the given IP Ranges covers all IPs. -// It returns false if the given IP Ranges is empty. -func IsCIDRsAllowAll(cidrs []netip.Prefix) bool { - for _, cidr := range cidrs { - if cidr.String() == IPv4AllowedAll || cidr.String() == IPv6AllowedAll { +func IsPrefixesAllowAll(prefixes []netip.Prefix) bool { + for _, p := range prefixes { + if p.Bits() == 0 { return true } } return false } -func ParseCIDRs(parts []string) ([]netip.Prefix, error) { +func ParsePrefixes(vs []string) ([]netip.Prefix, error) { var rv []netip.Prefix - for _, part := range parts { - prefix, err := netip.ParsePrefix(part) + for _, v := range vs { + prefix, err := netip.ParsePrefix(v) if err != nil { - return nil, fmt.Errorf("invalid IP range %s: %w", part, err) + return nil, fmt.Errorf("invalid CIDR `%s`: %w", v, err) } rv = append(rv, prefix) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go new file mode 100644 index 000000000..82b617e25 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go @@ -0,0 +1,461 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitygroup + +import ( + "bytes" + "crypto/md5" //nolint:gosec + "encoding/json" + "fmt" + "net/netip" + "sort" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/fnutil" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" +) + +const ( + SecurityRuleNamePrefix = "k8s-azure-lb" + SecurityRuleNameSep = "_" +) + +// Refer: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits?toc=%2Fazure%2Fvirtual-network%2Ftoc.json#azure-resource-manager-virtual-networking-limits +const ( + MaxSecurityRulesPerGroup = 1_000 + MaxSecurityRuleSourceIPsPerGroup = 4_000 + MaxSecurityRuleDestinationIPsPerGroup = 4_000 +) + +const ( + ServiceTagInternet = "Internet" +) + +var ( + ErrInvalidSecurityGroup = fmt.Errorf("invalid SecurityGroup object") + ErrSecurityRulePriorityExhausted = fmt.Errorf("security rule priority exhausted") + ErrSecurityRuleSourceAddressesNotFromSameIPFamily = fmt.Errorf("security rule source addresses must be from the same IP family") + ErrSecurityRuleDestinationAddressesNotFromSameIPFamily = fmt.Errorf("security rule destination addresses must be from the same IP family") + ErrSecurityRuleSourceAndDestinationNotFromSameIPFamily = fmt.Errorf("security rule source addresses and destination addresses must be from the same IP family") +) + +// RuleHelper manages security rules within a security group. +type RuleHelper struct { + logger klog.Logger + sg *network.SecurityGroup + snapshot []byte + + // helper map to store the security rules + // name -> security rule + rules map[string]*network.SecurityRule + priorities map[int32]string +} + +func NewSecurityGroupHelper(sg *network.SecurityGroup) (*RuleHelper, error) { + if sg == nil || + sg.Name == nil || + sg.SecurityGroupPropertiesFormat == nil || + sg.SecurityRules == nil { + // In the real world, this should never happen. + // Anyway, defensively check it. + return nil, ErrInvalidSecurityGroup + } + var ( + logger = klog.Background().WithName("RuleHelper").WithValues("security-group-name", ptr.To(sg.Name)) + rules = make(map[string]*network.SecurityRule, len(*sg.SecurityGroupPropertiesFormat.SecurityRules)) + priorities = make(map[int32]string, len(*sg.SecurityGroupPropertiesFormat.SecurityRules)) + ) + for i := range *sg.SecurityGroupPropertiesFormat.SecurityRules { + rule := (*sg.SecurityGroupPropertiesFormat.SecurityRules)[i] + rules[*rule.Name] = &rule + priorities[*rule.Priority] = *rule.Name + } + + snapshot := makeSecurityGroupSnapshot(sg) + + return &RuleHelper{ + logger: logger, + sg: sg, + + rules: rules, + priorities: priorities, + snapshot: snapshot, + }, nil +} + +type rulePriorityPrefer string + +const ( + rulePriorityPreferFromStart rulePriorityPrefer = "from_start" + rulePriorityPreferFromEnd rulePriorityPrefer = "from_end" +) + +// nextRulePriority returns the next available priority for a new rule. +// It takes a preference for whether to start from the beginning or end of the priority range. +func (helper *RuleHelper) nextRulePriority(prefer rulePriorityPrefer) (int32, error) { + var ( + init, end = consts.LoadBalancerMinimumPriority, consts.LoadBalancerMaximumPriority + delta = 1 + ) + if prefer == rulePriorityPreferFromEnd { + init, end, delta = end-1, init-1, -1 + } + + for init != end { + p := int32(init) + if _, found := helper.priorities[p]; found { + init += delta + continue + } + return p, nil + } + + return 0, ErrSecurityRulePriorityExhausted +} + +// getOrCreateRule returns an existing rule or create a new one if it doesn't exist. +func (helper *RuleHelper) getOrCreateRule(name string, priorityPrefer rulePriorityPrefer) (*network.SecurityRule, error) { + logger := helper.logger.WithName("getOrCreateRule").WithValues("rule-name", name) + + if rule, found := helper.rules[name]; found { + logger.V(4).Info("Found an existing rule", "priority", *rule.Priority) + return rule, nil + } + + priority, err := helper.nextRulePriority(priorityPrefer) + if err != nil { + // NOTE: right now it won't happen because the number of rules is limited. + // maxPriority[4096] - minPriority[500] > maxSecurityRulesPerGroup[1000] + helper.logger.Error(err, "Failed to get an available rule priority") + return nil, err + } + rule := &network.SecurityRule{ + Name: ptr.To(name), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: ptr.To(priority), + }, + } + + helper.rules[name] = rule + helper.priorities[priority] = name + + logger.V(4).Info("Adding a new rule", "rule-name", name, "priority", priority) + + return rule, nil +} + +// addAllowRule adds a rule that allows certain traffic. +func (helper *RuleHelper) addAllowRule( + protocol network.SecurityRuleProtocol, + ipFamily iputil.Family, + srcPrefixes []string, + dstPrefixes []string, + dstPorts []int32, +) error { + name := GenerateAllowSecurityRuleName(protocol, ipFamily, srcPrefixes, dstPorts) + rule, err := helper.getOrCreateRule(name, rulePriorityPreferFromStart) + if err != nil { + return err + } + dstPortRanges := fnutil.Map(func(p int32) string { return strconv.FormatInt(int64(p), 10) }, dstPorts) + sort.Strings(dstPortRanges) + + rule.Protocol = protocol + rule.Access = network.SecurityRuleAccessAllow + rule.Direction = network.SecurityRuleDirectionInbound + { + // Source + if len(srcPrefixes) == 1 { + rule.SourceAddressPrefix = ptr.To(srcPrefixes[0]) + } else { + rule.SourceAddressPrefixes = ptr.To(srcPrefixes) + } + rule.SourcePortRange = ptr.To("*") + } + { + // Destination + if rule.DestinationAddressPrefixes == nil { + rule.DestinationAddressPrefixes = &[]string{} + } + rule.DestinationAddressPrefixes = ptr.To( + NormalizeSecurityRuleAddressPrefixes(append(*rule.DestinationAddressPrefixes, dstPrefixes...)), + ) + rule.DestinationPortRanges = ptr.To(dstPortRanges) + } + + helper.logger.V(4).Info("Patched a rule for allow", "rule-name", name) + + return nil +} + +// AddRuleForAllowedServiceTag adds a rule for traffic from a certain service tag. +func (helper *RuleHelper) AddRuleForAllowedServiceTag( + serviceTag string, + protocol network.SecurityRuleProtocol, + dstAddresses []netip.Addr, + dstPorts []int32, +) error { + if !iputil.AreAddressesFromSameFamily(dstAddresses) { + return ErrSecurityRuleDestinationAddressesNotFromSameIPFamily + } + + var ( + ipFamily = iputil.FamilyOfAddr(dstAddresses[0]) + srcPrefixes = []string{serviceTag} + dstPrefixes = fnutil.Map(func(ip netip.Addr) string { return ip.String() }, dstAddresses) + ) + + helper.logger.V(4).Info("Patching a rule for allowed service tag", "ip-family", ipFamily) + + return helper.addAllowRule(protocol, ipFamily, srcPrefixes, dstPrefixes, dstPorts) +} + +// AddRuleForAllowedIPRanges adds a rule for traffic from certain IP ranges. +func (helper *RuleHelper) AddRuleForAllowedIPRanges( + ipRanges []netip.Prefix, + protocol network.SecurityRuleProtocol, + dstAddresses []netip.Addr, + dstPorts []int32, +) error { + if !iputil.ArePrefixesFromSameFamily(ipRanges) { + return ErrSecurityRuleSourceAddressesNotFromSameIPFamily + } + if !iputil.AreAddressesFromSameFamily(dstAddresses) { + return ErrSecurityRuleDestinationAddressesNotFromSameIPFamily + } + if ipRanges[0].Addr().Is4() != dstAddresses[0].Is4() { + return ErrSecurityRuleSourceAndDestinationNotFromSameIPFamily + } + + var ( + ipFamily = iputil.FamilyOfAddr(ipRanges[0].Addr()) + srcPrefixes = fnutil.Map(func(ip netip.Prefix) string { return ip.String() }, ipRanges) + dstPrefixes = fnutil.Map(func(ip netip.Addr) string { return ip.String() }, dstAddresses) + ) + + helper.logger.V(4).Info("Patching a rule for allowed IP ranges", "ip-family", ipFamily) + + return helper.addAllowRule(protocol, ipFamily, srcPrefixes, dstPrefixes, dstPorts) +} + +// AddRuleForDenyAll adds a rule to deny all traffic from the given destination addresses. +// NOTE: +// This rule is to limit the traffic inside the VNet. +// The traffic out of the VNet is already limited by rule `DenyAllInBound`. +func (helper *RuleHelper) AddRuleForDenyAll(dstAddresses []netip.Addr) error { + if !iputil.AreAddressesFromSameFamily(dstAddresses) { + return ErrSecurityRuleDestinationAddressesNotFromSameIPFamily + } + + var ( + ipFamily = iputil.FamilyOfAddr(dstAddresses[0]) + ruleName = GenerateDenyAllSecurityRuleName(ipFamily) + ) + + helper.logger.V(4).Info("Patching a rule for deny all", "ip-family", ipFamily) + + rule, err := helper.getOrCreateRule(ruleName, rulePriorityPreferFromEnd) + if err != nil { + return err + } + rule.Protocol = network.SecurityRuleProtocolAsterisk + rule.Access = network.SecurityRuleAccessDeny + rule.Direction = network.SecurityRuleDirectionInbound + { + // Source + rule.SourceAddressPrefix = ptr.To("*") + rule.SourcePortRange = ptr.To("*") + } + { + // Destination + addresses := fnutil.Map(func(ip netip.Addr) string { return ip.String() }, dstAddresses) + if rule.DestinationAddressPrefixes == nil { + rule.DestinationAddressPrefixes = &[]string{} + } + rule.DestinationAddressPrefixes = ptr.To( + NormalizeSecurityRuleAddressPrefixes(append(*rule.DestinationAddressPrefixes, addresses...)), + ) + rule.DestinationPortRange = ptr.To("*") + } + + helper.logger.V(4).Info("Patched a rule for deny all", "rule-name", ptr.To(rule.Name)) + + return nil +} + +// RemoveDestinationPrefixesFromRules removes the given destination addresses from all rules. +func (helper *RuleHelper) RemoveDestinationPrefixesFromRules(prefixes []string) { + helper.logger.V(10).Info("Cleaning destination address prefixes from SecurityGroup", "num-dst-prefixes", len(prefixes)) + + index := make(map[string]bool, len(prefixes)) + for _, p := range prefixes { + index[p] = true + } + + for _, rule := range helper.rules { + if rule.DestinationAddressPrefixes == nil { + if rule.DestinationAddressPrefix != nil && index[*rule.DestinationAddressPrefix] { + rule.DestinationAddressPrefix = nil + } + continue + } + + dstPrefixes := fnutil.RemoveIf(func(dstAddress string) bool { + return index[dstAddress] + }, *rule.DestinationAddressPrefixes) + + switch len(dstPrefixes) { + case len(*rule.DestinationAddressPrefixes): + // No change. + continue + default: + // Update the prefixes. + rule.DestinationAddressPrefixes = ptr.To( + NormalizeSecurityRuleAddressPrefixes(dstPrefixes), + ) + } + } +} + +// SecurityGroup returns the underlying SecurityGroup object and a bool indicating whether any changes were made to the RuleHelper. +func (helper *RuleHelper) SecurityGroup() (*network.SecurityGroup, bool, error) { + var ( + rv = helper.sg + rules = make([]network.SecurityRule, 0, len(helper.rules)) + ) + for _, r := range helper.rules { + noDstPrefixes := ptr.Deref(r.DestinationAddressPrefix, "") == "" && + len(ptr.Deref(r.DestinationAddressPrefixes, []string{})) == 0 + if noDstPrefixes { + // Skip the rule without destination prefixes. + continue + } + rules = append(rules, *r) + } + + rv.SecurityRules = &rules + + var ( + snapshot = makeSecurityGroupSnapshot(rv) + updated = !bytes.Equal(helper.snapshot, snapshot) + nRules int + nSrcIPs int + nDstIPs int + ) + { + // Check whether the SecurityGroup exceeds the limit. + for _, rule := range *rv.SecurityRules { + nRules++ + if rule.SourceAddressPrefixes != nil { + nSrcIPs += len(*rule.SourceAddressPrefixes) + } + if rule.SourceAddressPrefix != nil { + nSrcIPs++ + } + if rule.DestinationAddressPrefixes != nil { + nDstIPs += len(*rule.DestinationAddressPrefixes) + } + if rule.DestinationAddressPrefix != nil { + nDstIPs++ + } + } + helper.logger.V(10).Info("Checking the number of rules and IP addresses", "num-rules", nRules, "num-src-ips", nSrcIPs, "num-dst-ips", nDstIPs) + if nRules > MaxSecurityRulesPerGroup { + return nil, false, fmt.Errorf("exceeds the maximum number of rules (%d > %d)", nRules, MaxSecurityRulesPerGroup) + } + if nSrcIPs > MaxSecurityRuleSourceIPsPerGroup { + return nil, false, fmt.Errorf("exceeds the maximum number of source IP addresses (%d > %d)", nSrcIPs, MaxSecurityRuleSourceIPsPerGroup) + } + if nDstIPs > MaxSecurityRuleDestinationIPsPerGroup { + return nil, false, fmt.Errorf("exceeds the maximum number of destination IP addresses (%d > %d)", nDstIPs, MaxSecurityRuleDestinationIPsPerGroup) + } + } + + return rv, updated, nil +} + +// NormalizeSecurityRuleAddressPrefixes normalizes the given rule address prefixes. +func NormalizeSecurityRuleAddressPrefixes(vs []string) []string { + // Remove redundant addresses. + indexes := make(map[string]bool, len(vs)) + for _, v := range vs { + indexes[v] = true + } + rv := make([]string, 0, len(indexes)) + for k := range indexes { + rv = append(rv, k) + } + sort.Strings(rv) + return rv +} + +// NormalizeDestinationPortRanges normalizes the given destination port ranges. +func NormalizeDestinationPortRanges(dstPorts []int32) []string { + rv := fnutil.Map(func(p int32) string { return strconv.FormatInt(int64(p), 10) }, dstPorts) + sort.Strings(rv) + return rv +} + +// makeSecurityGroupSnapshot returns a byte array as the snapshot of the given SecurityGroup. +// It's used to check if the SecurityGroup had been changed. +func makeSecurityGroupSnapshot(sg *network.SecurityGroup) []byte { + sort.SliceStable(*sg.SecurityGroupPropertiesFormat.SecurityRules, func(i, j int) bool { + return *(*sg.SecurityGroupPropertiesFormat.SecurityRules)[i].Priority < *(*sg.SecurityGroupPropertiesFormat.SecurityRules)[j].Priority + }) + snapshot, _ := json.Marshal(sg) + return snapshot +} + +// GenerateAllowSecurityRuleName returns the AllowInbound rule name based on the given rule properties. +func GenerateAllowSecurityRuleName( + protocol network.SecurityRuleProtocol, + ipFamily iputil.Family, + srcPrefixes []string, + dstPorts []int32, +) string { + var ruleID string + { + dstPortRanges := fnutil.Map(func(p int32) string { return strconv.FormatInt(int64(p), 10) }, dstPorts) + // Generate rule ID from protocol, source prefixes and destination port ranges. + sort.Strings(srcPrefixes) + sort.Strings(dstPortRanges) + + v := strings.Join([]string{ + string(protocol), + strings.Join(srcPrefixes, ","), + strings.Join(dstPortRanges, ","), + }, "_") + + h := md5.New() //nolint:gosec + h.Write([]byte(v)) + + ruleID = fmt.Sprintf("%x", h.Sum(nil)) + } + + return strings.Join([]string{SecurityRuleNamePrefix, "allow", string(ipFamily), ruleID}, SecurityRuleNameSep) +} + +// GenerateDenyAllSecurityRuleName returns the DenyInbound rule name based on the given rule properties. +func GenerateDenyAllSecurityRuleName(ipFamily iputil.Family) string { + return strings.Join([]string{SecurityRuleNamePrefix, "deny-all", string(ipFamily)}, SecurityRuleNameSep) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581..41fc2474a 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go index 75a492d8e..f1aa25860 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { set.Set().Iterate(func(p fieldpath.Path) { conflicts = append(conflicts, Conflict{ Manager: manager, - Path: p, + Path: p.Copy(), }) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index 1b23dcbd5..d5a977d60 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" ) // Converter is an interface to the conversion logic. The converter @@ -27,19 +28,39 @@ type Converter interface { IsMissingVersionError(error) bool } -// Updater is the object used to compute updated FieldSets and also -// merge the object on Apply. -type Updater struct { +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool } func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { @@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp // Apply should be called when Apply is run, given the current object as // well as the configuration that is applied. This will merge the object -// and return it. If the object hasn't changed, nil is returned (the -// managers can still have changed though). +// and return it. func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { var err error managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { @@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if compare.IsSame() { + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { newObject = nil } return newObject, managers, nil @@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel if lastSet == nil || lastSet.Set().Empty() { return merged, nil } - convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) if err != nil { if s.Converter.IsMissingVersionError(err) { return merged, nil @@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) - pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) if err != nil { return nil, fmt.Errorf("failed add back owned items: %v", err) } @@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel // addBackOwnedItems adds back any fields, list and map items that were removed by prune, // but other appliers or updaters (or the current applier's new config) claim to own. -func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { var err error managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} for _, managerSet := range managedFields { @@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie } // Add back owned items at pruned version first to avoid conversion failure // caused by pruned fields which are required for conversion. - prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) if managed, ok := managedAtVersion[prunedVersion]; ok { merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc7582..5d3707a5b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d1308..6eb6c36df 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 000000000..ed483cbbc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f..78fdb0e75 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 913644083..fa227ac40 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc..4258ee5ba 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d..ad071ee8f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff05..d563a87ee 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe2..9be902828 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae..000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d30219..652e24c81 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c720..c38402b99 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e208628..c3ae00b18 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0f..f0d58d42c 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} From ea94371897b708a84441f03f324a3e25d8282c7c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 9 Jan 2024 20:55:20 +0800 Subject: [PATCH 045/157] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c2c89e77a..9123aa2b8 100644 --- a/README.md +++ b/README.md @@ -58,8 +58,10 @@ This option does not depend on cloud provider config file, supports cross subscr - install managed CSI driver on following platforms: - [AKS](https://learn.microsoft.com/en-us/azure/aks/azure-blob-csi) -### Usage +### Examples - [Basic usage](./deploy/example/e2e_usage.md) + +### Usage - [NFSv3](./deploy/example/nfs) - [fsGroupPolicy](./deploy/example/fsgroup) - [Volume cloning](./deploy/example/cloning) From b3ce32fbf0e64785b9ccbbbbb42b17f1a24321df Mon Sep 17 00:00:00 2001 From: weizhichen Date: Tue, 9 Jan 2024 14:40:26 +0000 Subject: [PATCH 046/157] fix ut --- pkg/blob/azure_test.go | 65 ++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 40 deletions(-) diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 4ba69d1a7..4e00a7f50 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -27,6 +27,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "github.com/Azure/azure-sdk-for-go/storage" "github.com/golang/mock/gomock" + "k8s.io/client-go/kubernetes" "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" @@ -61,12 +62,7 @@ kind: Config users: - name: foo-user user: - exec: - apiVersion: client.authentication.k8s.io/v1alpha1 - args: - - arg-1 - - arg-2 - command: foo-command + token: 2fef7f7c64127579b48d61434c44ad46d87793169ee6a4199af3ce16a3cf5be3371 ` err := createTestFile(emptyKubeConfig) @@ -87,55 +83,41 @@ users: nodeID string userAgent string allowEmptyCloudConfig bool - expectedErr error + expectedErrorMessage string + expectError bool }{ { - desc: "out of cluster, no kubeconfig, no credential file", - kubeconfig: "", + desc: "[success] out of cluster, no kubeconfig, no credential file", nodeID: "", allowEmptyCloudConfig: true, - expectedErr: nil, + expectError: false, }, { - desc: "[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", - kubeconfig: "", + desc: "[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", nodeID: "", allowEmptyCloudConfig: false, - expectedErr: nil, + expectError: true, + expectedErrorMessage: "no cloud config provided, error: open /etc/kubernetes/azure.json: no such file or directory", }, { - desc: "[failure] out of cluster & in cluster, specify a non-exist kubeconfig, no credential file", - kubeconfig: "/tmp/non-exist.json", - nodeID: "", - allowEmptyCloudConfig: true, - expectedErr: nil, - }, - { - desc: "[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file", - kubeconfig: emptyKubeConfig, - nodeID: "", - allowEmptyCloudConfig: true, - expectedErr: fmt.Errorf("invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"), - }, - { - desc: "[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file", + desc: "[success] out of cluster & in cluster, specify a fake kubeconfig, no credential file", createFakeKubeConfig: true, kubeconfig: fakeKubeConfig, nodeID: "", allowEmptyCloudConfig: true, - expectedErr: nil, + expectError: false, }, { desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file", createFakeCredFile: true, - kubeconfig: "", nodeID: "", userAgent: "useragent", allowEmptyCloudConfig: true, - expectedErr: nil, + expectError: false, }, } + var kubeClient kubernetes.Interface for _, test := range tests { if test.createFakeKubeConfig { if err := createTestFile(fakeKubeConfig); err != nil { @@ -150,6 +132,13 @@ users: if err := os.WriteFile(fakeKubeConfig, []byte(fakeContent), 0666); err != nil { t.Error(err) } + + kubeClient, err = util.GetKubeClient(test.kubeconfig, 25.0, 50, "") + if err != nil { + t.Error(err) + } + } else { + kubeClient = nil } if test.createFakeCredFile { if err := createTestFile(fakeCredFile); err != nil { @@ -169,17 +158,13 @@ users: } os.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile) } - kubeClient, err := util.GetKubeClient(test.kubeconfig, 25.0, 50, "") - if err != nil { - if !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) { - t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) - } - continue - } + cloud, err := GetCloudProvider(context.Background(), kubeClient, test.nodeID, "", "", test.userAgent, test.allowEmptyCloudConfig) - if !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) { - t.Errorf("desc: %s,\n input: %q, GetCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr) + assert.Equal(t, test.expectError, err != nil) + if test.expectError { + assert.Equal(t, test.expectedErrorMessage, err.Error()) } + if cloud == nil { t.Errorf("return value of getCloudProvider should not be nil even there is error") } else { From dbb0c96143f89b3822352ba3ef50b2f04e35099c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jan 2024 16:43:17 +0000 Subject: [PATCH 047/157] chore(deps): bump golang.org/x/net from 0.19.0 to 0.20.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.19.0 to 0.20.0. - [Commits](https://github.com/golang/net/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +-- go.sum | 16 +++--- .../x/crypto/internal/poly1305/bits_compat.go | 39 -------------- .../x/crypto/internal/poly1305/bits_go1.13.go | 21 -------- .../x/crypto/internal/poly1305/sum_generic.go | 43 ++++++++------- vendor/golang.org/x/sys/unix/mkerrors.sh | 37 +++++++------ vendor/golang.org/x/sys/unix/zerrors_linux.go | 54 +++++++++++++++++++ .../x/sys/unix/zsyscall_openbsd_386.go | 2 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 2 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2 - .../x/sys/windows/syscall_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 9 ++++ vendor/modules.txt | 8 +-- 17 files changed, 124 insertions(+), 126 deletions(-) delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go diff --git a/go.mod b/go.mod index 2386ee967..98376a8ce 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/pelletier/go-toml v1.9.5 github.com/stretchr/testify v1.8.4 go.uber.org/mock v0.4.0 - golang.org/x/net v0.19.0 + golang.org/x/net v0.20.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 k8s.io/api v0.29.0 @@ -128,12 +128,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.6.0 - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.14.0 // indirect diff --git a/go.sum b/go.sum index 3eacd79f9..34b6696f9 100644 --- a/go.sum +++ b/go.sum @@ -326,8 +326,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -346,8 +346,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -367,13 +367,13 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index d33c8890f..000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index 495c1fa69..000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5ea..ec2202bd7 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²â¸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6202638ba..c6492020e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -603,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c73cfe2f1..a5d3ff8df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2127,6 +2127,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a1d061597..9dc42410b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 5b2a74097..0d3a0751c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index f6eda1344..c39f7776d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 55df20ae9..57571d072 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8c1155cbc..e62963e67 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 7cc80c58d..00831354c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 0688737f4..79029ed58 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 47dc57967..ffb8708cc 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -194,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 146a1f019..e8791c82c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -342,6 +342,7 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -2988,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index b8833d26e..8c768bbc5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -536,7 +536,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.18.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -557,7 +557,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.19.0 +# golang.org/x/net v0.20.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -580,14 +580,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.15.0 +# golang.org/x/sys v0.16.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.16.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 From 2d3abc1aee7d8653c9632ee528a7e9c35cc020a0 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Wed, 10 Jan 2024 03:05:47 +0000 Subject: [PATCH 048/157] fix ut --- pkg/blob/azure_test.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 04d993b3d..7da2ca295 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -22,6 +22,7 @@ import ( "os" "reflect" "strings" + "syscall" "testing" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" @@ -82,21 +83,19 @@ users: nodeID string userAgent string allowEmptyCloudConfig bool - expectedErrorMessage string - expectError bool + expectedErr error }{ { desc: "[success] out of cluster, no kubeconfig, no credential file", nodeID: "", allowEmptyCloudConfig: true, - expectError: false, + expectedErr: nil, }, { desc: "[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", nodeID: "", allowEmptyCloudConfig: false, - expectError: true, - expectedErrorMessage: "no cloud config provided, error: open /etc/kubernetes/azure.json: no such file or directory", + expectedErr: syscall.ENOENT, }, { desc: "[success] out of cluster & in cluster, specify a fake kubeconfig, no credential file", @@ -104,7 +103,7 @@ users: kubeconfig: fakeKubeConfig, nodeID: "", allowEmptyCloudConfig: true, - expectError: false, + expectedErr: nil, }, { desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file", @@ -112,7 +111,7 @@ users: nodeID: "", userAgent: "useragent", allowEmptyCloudConfig: true, - expectError: false, + expectedErr: nil, }, } @@ -159,10 +158,7 @@ users: } cloud, err := GetCloudProvider(context.Background(), kubeClient, test.nodeID, "", "", test.userAgent, test.allowEmptyCloudConfig) - assert.Equal(t, test.expectError, err != nil) - if test.expectError { - assert.Equal(t, test.expectedErrorMessage, err.Error()) - } + assert.ErrorIs(t, err, test.expectedErr) if cloud == nil { t.Errorf("return value of getCloudProvider should not be nil even there is error") From 2f52ae21931c99a3ef61bb1f14a1bfd76793a23e Mon Sep 17 00:00:00 2001 From: Weizhi Chen Date: Thu, 11 Jan 2024 00:33:40 +0800 Subject: [PATCH 049/157] fix ut windows --- pkg/blob/azure_test.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 7da2ca295..8aab9dc51 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "reflect" + "runtime" "strings" "syscall" "testing" @@ -92,11 +93,17 @@ users: expectedErr: nil, }, { - desc: "[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", + desc: "[linux][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", nodeID: "", allowEmptyCloudConfig: false, expectedErr: syscall.ENOENT, }, + { + desc: "[windows][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", + nodeID: "", + allowEmptyCloudConfig: false, + expectedErr: syscall.ENOTDIR, + }, { desc: "[success] out of cluster & in cluster, specify a fake kubeconfig, no credential file", createFakeKubeConfig: true, @@ -117,6 +124,12 @@ users: var kubeClient kubernetes.Interface for _, test := range tests { + if strings.HasPrefix(test.desc, "[linux]") && runtime.GOOS == "windows" { + continue + } + if strings.HasPrefix(test.desc, "[windows]") && runtime.GOOS == "linux" { + continue + } if test.createFakeKubeConfig { if err := createTestFile(fakeKubeConfig); err != nil { t.Error(err) From 23d05706e50467bdb1a4666d620901bb458b9228 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Thu, 11 Jan 2024 04:30:02 +0000 Subject: [PATCH 050/157] fix --- pkg/blob/azure_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 8aab9dc51..3dda37553 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -124,10 +124,10 @@ users: var kubeClient kubernetes.Interface for _, test := range tests { - if strings.HasPrefix(test.desc, "[linux]") && runtime.GOOS == "windows" { + if strings.HasPrefix(test.desc, "[linux]") && runtime.GOOS != "linux" { continue } - if strings.HasPrefix(test.desc, "[windows]") && runtime.GOOS == "linux" { + if strings.HasPrefix(test.desc, "[windows]") && runtime.GOOS != "windows" { continue } if test.createFakeKubeConfig { From 5b8a6cf03a97561d332275290e2a5bec3424de89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 16:09:30 +0000 Subject: [PATCH 051/157] chore(deps): bump k8s.io/klog/v2 from 2.110.1 to 2.120.0 Bumps [k8s.io/klog/v2](https://github.com/kubernetes/klog) from 2.110.1 to 2.120.0. - [Release notes](https://github.com/kubernetes/klog/releases) - [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes/klog/compare/v2.110.1...v2.120.0) --- updated-dependencies: - dependency-name: k8s.io/klog/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 5 +- vendor/github.com/go-logr/logr/slogr/slogr.go | 61 ------------------- vendor/k8s.io/klog/v2/OWNERS | 4 +- vendor/k8s.io/klog/v2/contextual_slog.go | 31 ++++++++++ vendor/k8s.io/klog/v2/klog.go | 23 ++++++- vendor/k8s.io/klog/v2/klogr_slog.go | 10 +-- vendor/k8s.io/klog/v2/safeptr.go | 34 +++++++++++ vendor/modules.txt | 5 +- 9 files changed, 98 insertions(+), 77 deletions(-) delete mode 100644 vendor/github.com/go-logr/logr/slogr/slogr.go create mode 100644 vendor/k8s.io/klog/v2/contextual_slog.go create mode 100644 vendor/k8s.io/klog/v2/safeptr.go diff --git a/go.mod b/go.mod index 98376a8ce..219bdfed9 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 k8s.io/component-base v0.29.0 - k8s.io/klog/v2 v2.110.1 + k8s.io/klog/v2 v2.120.0 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 diff --git a/go.sum b/go.sum index 34b6696f9..732fd44c9 100644 --- a/go.sum +++ b/go.sum @@ -105,7 +105,6 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -449,8 +448,8 @@ k8s.io/controller-manager v0.29.0 h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc k8s.io/controller-manager v0.29.0/go.mod h1:UKtadWkULF5bfX7vu3hHppzY/hz88C03t70GItg/x08= k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.0 h1:z+q5mfovBj1fKFxiRzsa2DsJLPIVMk/KFL81LMOfK+8= +k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go deleted file mode 100644 index 36432c56f..000000000 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ /dev/null @@ -1,61 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slogr enables usage of a slog.Handler with logr.Logger as front-end -// API and of a logr.LogSink through the slog.Handler and thus slog.Logger -// APIs. -// -// See the README in the top-level [./logr] package for a discussion of -// interoperability. -// -// Deprecated: use the main logr package instead. -package slogr - -import ( - "log/slog" - - "github.com/go-logr/logr" -) - -// NewLogr returns a logr.Logger which writes to the slog.Handler. -// -// Deprecated: use [logr.FromSlogHandler] instead. -func NewLogr(handler slog.Handler) logr.Logger { - return logr.FromSlogHandler(handler) -} - -// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. -// -// Deprecated: use [logr.ToSlogHandler] instead. -func NewSlogHandler(logger logr.Logger) slog.Handler { - return logr.ToSlogHandler(logger) -} - -// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. -// -// Deprecated: use [logr.ToSlogHandler] instead. -func ToSlogHandler(logger logr.Logger) slog.Handler { - return logr.ToSlogHandler(logger) -} - -// SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. -// -// Deprecated: use [logr.SlogSink] instead. -type SlogSink = logr.SlogSink diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index a2fe8f351..7500475a6 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,14 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - harshanarayana + - mengjiao-liu - pohly approvers: - dims + - pohly - thockin - - serathius emeritus_approvers: - brancz - justinsb - lavalamp - piosz + - serathius - tallclair diff --git a/vendor/k8s.io/klog/v2/contextual_slog.go b/vendor/k8s.io/klog/v2/contextual_slog.go new file mode 100644 index 000000000..d3b562521 --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual_slog.go @@ -0,0 +1,31 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" + + "github.com/go-logr/logr" +) + +// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil. +func SetSlogLogger(logger *slog.Logger) { + SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true)) +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 72502db3a..026be9e3b 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -14,9 +14,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// Package klog contains the following functionality: +// +// - output routing as defined via command line flags ([InitFlags]) +// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.) +// or as a structured log entry with message and key/value pairs ([InfoS], etc.) +// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO]) +// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore]) +// - wrappers for [logr] APIs for contextual logging where the wrappers can +// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext], +// [LoggerWithValues], [LoggerWithName]); if the ability to turn off +// contextual logging is not needed, then go-logr can also be used directly +// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger]) +// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with +// simpler output routing; beware that it comes with its own command line flags +// and does not use the ones from klog +// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests +// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package; +// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not +// - [k8s.io/klog/v2/examples]: demos of this functionality +// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations // // Basic examples: // diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go index f7bf74030..c77d7baaf 100644 --- a/vendor/k8s.io/klog/v2/klogr_slog.go +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -25,7 +25,7 @@ import ( "strconv" "time" - "github.com/go-logr/logr/slogr" + "github.com/go-logr/logr" "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/serialize" @@ -35,7 +35,7 @@ import ( func (l *klogger) Handle(ctx context.Context, record slog.Record) error { if logging.logger != nil { - if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok { // Let that logger do the work. return slogSink.Handle(ctx, record) } @@ -77,13 +77,13 @@ func slogOutput(file string, line int, now time.Time, err error, s severity.Seve buffer.PutBuffer(b) } -func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { +func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { clone := *l clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) return &clone } -func (l *klogger) WithGroup(name string) slogr.SlogSink { +func (l *klogger) WithGroup(name string) logr.SlogSink { clone := *l if clone.groups != "" { clone.groups += "." + name @@ -93,4 +93,4 @@ func (l *klogger) WithGroup(name string) slogr.SlogSink { return &clone } -var _ slogr.SlogSink = &klogger{} +var _ logr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/klog/v2/safeptr.go b/vendor/k8s.io/klog/v2/safeptr.go new file mode 100644 index 000000000..bbe24c2e8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/safeptr.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +// SafePtr is a function that takes a pointer of any type (T) as an argument. +// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead. +// +// This function is particularly useful to prevent nil pointer dereferencing when: +// +// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`. +// - And these interface implementations do not perform nil checks themselves. +func SafePtr[T any](p *T) any { + if p == nil { + return nil + } + return p +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8c768bbc5..458010626 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -193,7 +193,6 @@ github.com/go-ini/ini ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr -github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -1410,8 +1409,8 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/klog/v2 v2.110.1 -## explicit; go 1.13 +# k8s.io/klog/v2 v2.120.0 +## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock From d367aa98a313d14f870f1e1bdfcf2dc53c50a619 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 16:26:39 +0000 Subject: [PATCH 052/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.13.2 to 2.14.0 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.13.2 to 2.14.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.13.2...v2.14.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 10 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 25 ++ .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 105 ++++++++- .../v2/internal/output_interceptor_wasm.go | 7 + .../v2/internal/progress_report_wasm.go | 10 + .../onsi/ginkgo/v2/internal/suite.go | 2 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 113 +++++++-- .../github.com/onsi/ginkgo/v2/types/errors.go | 9 + .../onsi/ginkgo/v2/types/version.go | 2 +- .../x/tools/go/ast/inspector/typeof.go | 4 +- .../x/tools/internal/typeparams/common.go | 204 ---------------- .../x/tools/internal/typeparams/coretype.go | 122 ---------- .../internal/typeparams/enabled_go117.go | 12 - .../internal/typeparams/enabled_go118.go | 15 -- .../x/tools/internal/typeparams/normalize.go | 218 ------------------ .../x/tools/internal/typeparams/termlist.go | 163 ------------- .../internal/typeparams/typeparams_go117.go | 197 ---------------- .../internal/typeparams/typeparams_go118.go | 151 ------------ .../x/tools/internal/typeparams/typeterm.go | 169 -------------- vendor/modules.txt | 5 +- 21 files changed, 248 insertions(+), 1299 deletions(-) create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go diff --git a/go.mod b/go.mod index 219bdfed9..353db5019 100644 --- a/go.mod +++ b/go.mod @@ -99,7 +99,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.13.2 + github.com/onsi/ginkgo/v2 v2.14.0 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect @@ -136,7 +136,7 @@ require ( golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/tools v0.16.1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/go.sum b/go.sum index 732fd44c9..cc1b76cd2 100644 --- a/go.sum +++ b/go.sum @@ -209,8 +209,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -333,8 +333,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -390,8 +388,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index ec91408f9..fbe515639 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,28 @@ +## 2.14.0 + +### Features +You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library. + +Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more. + +- Introduce DescribeTableSubtree [65ec56d] +- add GinkgoTB() to docs [4a2c832] +- Add GinkgoTB() function (#1333) [92b6744] + +### Fixes +- Fix typo in internal/suite.go (#1332) [beb9507] +- Fix typo in docs/index.md (#1319) [4ac3a13] +- allow wasm to compile with ginkgo present (#1311) [b2e5bc5] + +### Maintenance +- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec] +- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40] +- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724] +- Bump golang.org/x/crypto (#1318) [3ee80ee] +- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5] +- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3] + ## 2.13.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 28447ffdd..639541a16 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,7 +1,10 @@ package ginkgo import ( + "testing" + "github.com/onsi/ginkgo/v2/internal/testingtproxy" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -15,7 +18,7 @@ correct line number associated with the failure - though you do not need to use You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { - offset := 3 + offset := 1 if len(optionalOffset) > 0 { offset = optionalOffset[0] } @@ -41,21 +44,21 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the type GinkgoTInterface interface { Cleanup(func()) Setenv(kev, value string) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) + Error(args ...any) + Errorf(format string, args ...any) Fail() FailNow() Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) Helper() - Log(args ...interface{}) - Logf(format string, args ...interface{}) + Log(args ...any) + Logf(format string, args ...any) Name() string Parallel() - Skip(args ...interface{}) + Skip(args ...any) SkipNow() - Skipf(format string, args ...interface{}) + Skipf(format string, args ...any) Skipped() bool TempDir() string } @@ -71,9 +74,9 @@ type FullGinkgoTInterface interface { AddReportEntryVisibilityNever(name string, args ...any) //Prints to the GinkgoWriter - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo F(format string, args ...any) string @@ -92,3 +95,81 @@ type FullGinkgoTInterface interface { AttachProgressReporter(func() string) func() } + +/* +GinkgoTB() implements a wrapper that exactly matches the testing.TB interface. + +In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB. + +This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB. + +Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +*/ +func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper { + offset := 2 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)} +} + +type GinkgoTBWrapper struct { + testing.TB + GinkgoT FullGinkgoTInterface +} + +func (g *GinkgoTBWrapper) Cleanup(f func()) { + g.GinkgoT.Cleanup(f) +} +func (g *GinkgoTBWrapper) Error(args ...any) { + g.GinkgoT.Error(args...) +} +func (g *GinkgoTBWrapper) Errorf(format string, args ...any) { + g.GinkgoT.Errorf(format, args...) +} +func (g *GinkgoTBWrapper) Fail() { + g.GinkgoT.Fail() +} +func (g *GinkgoTBWrapper) FailNow() { + g.GinkgoT.FailNow() +} +func (g *GinkgoTBWrapper) Failed() bool { + return g.GinkgoT.Failed() +} +func (g *GinkgoTBWrapper) Fatal(args ...any) { + g.GinkgoT.Fatal(args...) +} +func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) { + g.GinkgoT.Fatalf(format, args...) +} +func (g *GinkgoTBWrapper) Helper() { + types.MarkAsHelper(1) +} +func (g *GinkgoTBWrapper) Log(args ...any) { + g.GinkgoT.Log(args...) +} +func (g *GinkgoTBWrapper) Logf(format string, args ...any) { + g.GinkgoT.Logf(format, args...) +} +func (g *GinkgoTBWrapper) Name() string { + return g.GinkgoT.Name() +} +func (g *GinkgoTBWrapper) Setenv(key, value string) { + g.GinkgoT.Setenv(key, value) +} +func (g *GinkgoTBWrapper) Skip(args ...any) { + g.GinkgoT.Skip(args...) +} +func (g *GinkgoTBWrapper) SkipNow() { + g.GinkgoT.SkipNow() +} +func (g *GinkgoTBWrapper) Skipf(format string, args ...any) { + g.GinkgoT.Skipf(format, args...) +} +func (g *GinkgoTBWrapper) Skipped() bool { + return g.GinkgoT.Skipped() +} +func (g *GinkgoTBWrapper) TempDir() string { + return g.GinkgoT.TempDir() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go new file mode 100644 index 000000000..4c374935b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go @@ -0,0 +1,7 @@ +//go:build wasm + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return &NoopOutputInterceptor{} +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go new file mode 100644 index 000000000..8c53fe0ad --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go @@ -0,0 +1,10 @@ +//go:build wasm + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index fe6e8288a..6746152ec 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -79,7 +79,7 @@ func NewSuite() *Suite { func (suite *Suite) Clone() (*Suite, error) { if suite.phase != PhaseBuildTopLevel { - return nil, fmt.Errorf("cnanot clone suite after tree has been built") + return nil, fmt.Errorf("cannot clone suite after tree has been built") } return &Suite{ tree: &TreeNode{}, diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index ac9b7abb5..a3aef821b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -46,7 +46,7 @@ And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-s */ func DescribeTable(description string, args ...interface{}) bool { GinkgoHelper() - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -56,7 +56,7 @@ You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. func FDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Focus) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -66,7 +66,7 @@ You can mark a table as pending with `PDescribeTable`. This is equivalent to `P func PDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Pending) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -75,6 +75,71 @@ You can mark a table as pending with `XDescribeTable`. This is equivalent to `X */ var XDescribeTable = PDescribeTable +/* +DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry. + +For example: + + DescribeTableSubtree("a subtree table", + func(url string, code int, message string) { + var resp *http.Response + BeforeEach(func() { + var err error + resp, err = http.Get(url) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(resp.Body.Close) + }) + + It("should return the expected status code", func() { + Expect(resp.StatusCode).To(Equal(code)) + }) + + It("should return the expected message", func() { + body, err := ioutil.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal(message)) + }) + }, + Entry("default response", "example.com/response", http.StatusOK, "hello world"), + Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"), + ) + +Note that you **must** place define an It inside the body function. + +You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + generateTable(description, true, args...) + return true +} + +/* +You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`. +*/ +func FDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Focus) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`. +*/ +func PDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Pending) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`. +*/ +var XDescribeTableSubtree = PDescribeTableSubtree + /* TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ @@ -131,14 +196,14 @@ var XEntry = PEntry var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func generateTable(description string, args ...interface{}) { +func generateTable(description string, isSubtree bool, args ...interface{}) { GinkgoHelper() cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} - var itBody interface{} - var itBodyType reflect.Type + var internalBody interface{} + var internalBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -166,11 +231,11 @@ func generateTable(description string, args ...interface{}) { case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): tableLevelEntryDescription = arg case t.Kind() == reflect.Func: - if itBody != nil { + if internalBody != nil { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } - itBody = arg - itBodyType = reflect.TypeOf(itBody) + internalBody = arg + internalBodyType = reflect.TypeOf(internalBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -200,39 +265,47 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - itNodeArgs := []interface{}{entry.codeLocation} - itNodeArgs = append(itNodeArgs, entry.decorations...) + internalNodeArgs := []interface{}{entry.codeLocation} + internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if itBodyType.NumIn() > 0. { - if itBodyType.In(0).Implements(specContextType) { + if internalBodyType.NumIn() > 0. { + if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { hasContext = true } } if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) } if hasContext { - itNodeArgs = append(itNodeArgs, func(c SpecContext) { + internalNodeArgs = append(internalNodeArgs, func(c SpecContext) { if err != nil { panic(err) } - invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...)) }) + if isSubtree { + exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl)) + } } else { - itNodeArgs = append(itNodeArgs, func() { + internalNodeArgs = append(internalNodeArgs, func() { if err != nil { panic(err) } - invokeFunction(itBody, entry.parameters) + invokeFunction(internalBody, entry.parameters) }) } - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + internalNodeType := types.NodeTypeIt + if isSubtree { + internalNodeType = types.NodeTypeContainer + } + + pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) } }) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 4fbdc3e9b..6bb72d00c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac } } +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index a4a1524b4..7015be128 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.13.2" +const VERSION = "2.14.0" diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 703c81395..2a872f89d 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - "golang.org/x/tools/internal/typeparams" ) const ( @@ -171,7 +169,7 @@ func typeOf(n ast.Node) uint64 { return 1 << nIncDecStmt case *ast.IndexExpr: return 1 << nIndexExpr - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return 1 << nIndexListExpr case *ast.InterfaceType: return 1 << nInterfaceType diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go deleted file mode 100644 index d0d0649fe..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. -// -// An external version of these APIs is available in the -// golang.org/x/exp/typeparams module. -package typeparams - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" -) - -// UnpackIndexExpr extracts data from AST nodes that represent index -// expressions. -// -// For an ast.IndexExpr, the resulting indices slice will contain exactly one -// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable -// number of index expressions. -// -// For nodes that don't represent index expressions, the first return value of -// UnpackIndexExpr will be nil. -func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { - switch e := n.(type) { - case *ast.IndexExpr: - return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: - return e.X, e.Lbrack, e.Indices, e.Rbrack - } - return nil, token.NoPos, nil, token.NoPos -} - -// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on -// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 -// will panic. -func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { - switch len(indices) { - case 0: - panic("empty indices") - case 1: - return &ast.IndexExpr{ - X: x, - Lbrack: lbrack, - Index: indices[0], - Rbrack: rbrack, - } - default: - return &IndexListExpr{ - X: x, - Lbrack: lbrack, - Indices: indices, - Rbrack: rbrack, - } - } -} - -// IsTypeParam reports whether t is a type parameter. -func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) - return ok -} - -// OriginMethod returns the origin method associated with the method fn. -// For methods on a non-generic receiver base type, this is just -// fn. However, for methods with a generic receiver, OriginMethod returns the -// corresponding method in the method set of the origin type. -// -// As a special case, if fn is not a method (has no receiver), OriginMethod -// returns fn. -func OriginMethod(fn *types.Func) *types.Func { - recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - return fn - } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { - // Receiver is a *types.Interface. - return fn - } - if ForNamed(named).Len() == 0 { - // Receiver base has no type parameters, so we can avoid the lookup below. - return fn - } - orig := NamedTypeOrigin(named) - gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) - - // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: - // package p - // type T *int - // func (*T) f() {} - // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. - // Here we make them consistent by force. - // (The go/types bug is general, but this workaround is reached only - // for generic T thanks to the early return above.) - if gfn == nil { - mset := types.NewMethodSet(types.NewPointer(orig)) - for i := 0; i < mset.Len(); i++ { - m := mset.At(i) - if m.Obj().Id() == fn.Id() { - gfn = m.Obj() - break - } - } - } - - // In golang/go#61196, we observe another crash, this time inexplicable. - if gfn == nil { - panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) - } - - return gfn.(*types.Func) -} - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go deleted file mode 100644 index 71248209e..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "go/types" -) - -// CoreType returns the core type of T or nil if T does not have a core type. -// -// See https://go.dev/ref/spec#Core_types for the definition of a core type. -func CoreType(T types.Type) types.Type { - U := T.Underlying() - if _, ok := U.(*types.Interface); !ok { - return U // for non-interface types, - } - - terms, err := _NormalTerms(U) - if len(terms) == 0 || err != nil { - // len(terms) -> empty type set of interface. - // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. - return nil // no core type. - } - - U = terms[0].Type().Underlying() - var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) - for identical = 1; identical < len(terms); identical++ { - if !types.Identical(U, terms[identical].Type().Underlying()) { - break - } - } - - if identical == len(terms) { - // https://go.dev/ref/spec#Core_types - // "There is a single type U which is the underlying type of all types in the type set of T" - return U - } - ch, ok := U.(*types.Chan) - if !ok { - return nil // no core type as identical < len(terms) and U is not a channel. - } - // https://go.dev/ref/spec#Core_types - // "the type chan E if T contains only bidirectional channels, or the type chan<- E or - // <-chan E depending on the direction of the directional channels present." - for chans := identical; chans < len(terms); chans++ { - curr, ok := terms[chans].Type().Underlying().(*types.Chan) - if !ok { - return nil - } - if !types.Identical(ch.Elem(), curr.Elem()) { - return nil // channel elements are not identical. - } - if ch.Dir() == types.SendRecv { - // ch is bidirectional. We can safely always use curr's direction. - ch = curr - } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { - // ch and curr are not bidirectional and not the same direction. - return nil - } - } - return ch -} - -// _NormalTerms returns a slice of terms representing the normalized structural -// type restrictions of a type, if any. -// -// For all types other than *types.TypeParam, *types.Interface, and -// *types.Union, this is just a single term with Tilde() == false and -// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see -// below. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration type -// T[P interface{~int; m()}] int the structural restriction of the type -// parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// _NormalTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, _NormalTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the type is -// invalid, exceeds complexity bounds, or has an empty type set. In the latter -// case, _NormalTerms returns ErrEmptyTypeSet. -// -// _NormalTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { - switch typ := typ.(type) { - case *TypeParam: - return StructuralTerms(typ) - case *Union: - return UnionTermSet(typ) - case *types.Interface: - return InterfaceTermSet(typ) - default: - return []*Term{NewTerm(false, typ)}, nil - } -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go deleted file mode 100644 index 9c631b651..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "errors" - "fmt" - "go/types" - "os" - "strings" -) - -//go:generate go run copytermlist.go - -const debug = false - -var ErrEmptyTypeSet = errors.New("empty type set") - -// StructuralTerms returns a slice of terms representing the normalized -// structural type restrictions of a type parameter, if any. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration -// -// type T[P interface{~int; m()}] int -// -// the structural restriction of the type parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// StructuralTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, StructuralTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the -// constraint interface is invalid, exceeds complexity bounds, or has an empty -// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. -// -// StructuralTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { - constraint := tparam.Constraint() - if constraint == nil { - return nil, fmt.Errorf("%s has nil constraint", tparam) - } - iface, _ := constraint.Underlying().(*types.Interface) - if iface == nil { - return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) - } - return InterfaceTermSet(iface) -} - -// InterfaceTermSet computes the normalized terms for a constraint interface, -// returning an error if the term set cannot be computed or is empty. In the -// latter case, the error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { - return computeTermSet(iface) -} - -// UnionTermSet computes the normalized terms for a union, returning an error -// if the term set cannot be computed or is empty. In the latter case, the -// error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func UnionTermSet(union *Union) ([]*Term, error) { - return computeTermSet(union) -} - -func computeTermSet(typ types.Type) ([]*Term, error) { - tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) - if err != nil { - return nil, err - } - if tset.terms.isEmpty() { - return nil, ErrEmptyTypeSet - } - if tset.terms.isAll() { - return nil, nil - } - var terms []*Term - for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) - } - return terms, nil -} - -// A termSet holds the normalized set of terms for a given type. -// -// The name termSet is intentionally distinct from 'type set': a type set is -// all types that implement a type (and includes method restrictions), whereas -// a term set just represents the structural restrictions on a type. -type termSet struct { - complete bool - terms termlist -} - -func indentf(depth int, format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) -} - -func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { - if t == nil { - panic("nil type") - } - - if debug { - indentf(depth, "%s", t.String()) - defer func() { - if err != nil { - indentf(depth, "=> %s", err) - } else { - indentf(depth, "=> %s", res.terms.String()) - } - }() - } - - const maxTermCount = 100 - if tset, ok := seen[t]; ok { - if !tset.complete { - return nil, fmt.Errorf("cycle detected in the declaration of %s", t) - } - return tset, nil - } - - // Mark the current type as seen to avoid infinite recursion. - tset := new(termSet) - defer func() { - tset.complete = true - }() - seen[t] = tset - - switch u := t.Underlying().(type) { - case *types.Interface: - // The term set of an interface is the intersection of the term sets of its - // embedded types. - tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { - return nil, fmt.Errorf("invalid embedded type %T", embedded) - } - tset2, err := computeTermSetInternal(embedded, seen, depth+1) - if err != nil { - return nil, err - } - tset.terms = tset.terms.intersect(tset2.terms) - } - case *Union: - // The term set of a union is the union of term sets of its terms. - tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) - var terms termlist - switch t.Type().Underlying().(type) { - case *types.Interface: - tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) - if err != nil { - return nil, err - } - terms = tset2.terms - case *TypeParam, *Union: - // A stand-alone type parameter or union is not permitted as union - // term. - return nil, fmt.Errorf("invalid union term %T", t) - default: - if t.Type() == types.Typ[types.Invalid] { - continue - } - terms = termlist{{t.Tilde(), t.Type()}} - } - tset.terms = tset.terms.union(terms) - if len(tset.terms) > maxTermCount { - return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) - } - } - case *TypeParam: - panic("unreachable") - default: - // For all other types, the term set is just a single non-tilde term - // holding the type itself. - if u != types.Typ[types.Invalid] { - tset.terms = termlist{{false, t}} - } - } - return tset, nil -} - -// under is a facade for the go/types internal function of the same name. It is -// used by typeterm.go. -func under(t types.Type) types.Type { - return t.Underlying() -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go deleted file mode 100644 index cbd12f801..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import ( - "bytes" - "go/types" -) - -// A termlist represents the type set represented by the union -// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. -// A termlist is in normal form if all terms are disjoint. -// termlist operations don't require the operands to be in -// normal form. -type termlist []*term - -// allTermlist represents the set of all types. -// It is in normal form. -var allTermlist = termlist{new(term)} - -// String prints the termlist exactly (without normalization). -func (xl termlist) String() string { - if len(xl) == 0 { - return "∅" - } - var buf bytes.Buffer - for i, x := range xl { - if i > 0 { - buf.WriteString(" | ") - } - buf.WriteString(x.String()) - } - return buf.String() -} - -// isEmpty reports whether the termlist xl represents the empty set of types. -func (xl termlist) isEmpty() bool { - // If there's a non-nil term, the entire list is not empty. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil { - return false - } - } - return true -} - -// isAll reports whether the termlist xl represents the set of all types. -func (xl termlist) isAll() bool { - // If there's a 𓤠term, the entire list is ð“¤. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil && x.typ == nil { - return true - } - } - return false -} - -// norm returns the normal form of xl. -func (xl termlist) norm() termlist { - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - used := make([]bool, len(xl)) - var rl termlist - for i, xi := range xl { - if xi == nil || used[i] { - continue - } - for j := i + 1; j < len(xl); j++ { - xj := xl[j] - if xj == nil || used[j] { - continue - } - if u1, u2 := xi.union(xj); u2 == nil { - // If we encounter a 𓤠term, the entire list is ð“¤. - // Exit early. - // (Note that this is not just an optimization; - // if we continue, we may end up with a 𓤠term - // and other terms and the result would not be - // in normal form.) - if u1.typ == nil { - return allTermlist - } - xi = u1 - used[j] = true // xj is now unioned into xi - ignore it in future iterations - } - } - rl = append(rl, xi) - } - return rl -} - -// union returns the union xl ∪ yl. -func (xl termlist) union(yl termlist) termlist { - return append(xl, yl...).norm() -} - -// intersect returns the intersection xl ∩ yl. -func (xl termlist) intersect(yl termlist) termlist { - if xl.isEmpty() || yl.isEmpty() { - return nil - } - - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - var rl termlist - for _, x := range xl { - for _, y := range yl { - if r := x.intersect(y); r != nil { - rl = append(rl, r) - } - } - } - return rl.norm() -} - -// equal reports whether xl and yl represent the same type set. -func (xl termlist) equal(yl termlist) bool { - // TODO(gri) this should be more efficient - return xl.subsetOf(yl) && yl.subsetOf(xl) -} - -// includes reports whether t ∈ xl. -func (xl termlist) includes(t types.Type) bool { - for _, x := range xl { - if x.includes(t) { - return true - } - } - return false -} - -// supersetOf reports whether y ⊆ xl. -func (xl termlist) supersetOf(y *term) bool { - for _, x := range xl { - if y.subsetOf(x) { - return true - } - } - return false -} - -// subsetOf reports whether xl ⊆ yl. -func (xl termlist) subsetOf(yl termlist) bool { - if yl.isEmpty() { - return xl.isEmpty() - } - - // each term x of xl must be a subset of yl - for _, x := range xl { - if !yl.supersetOf(x) { - return false // x is not a subset yl - } - } - return true -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index 7ed86e171..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) *types.Named { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index cf301af1d..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) *types.Named { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go deleted file mode 100644 index 7350bb702..000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import "go/types" - -// A term describes elementary type sets: -// -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// ð“¤: &term{} == 𓤠// set of all types (ð“¤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -type term struct { - tilde bool // valid if typ != nil - typ types.Type -} - -func (x *term) String() string { - switch { - case x == nil: - return "∅" - case x.typ == nil: - return "ð“¤" - case x.tilde: - return "~" + x.typ.String() - default: - return x.typ.String() - } -} - -// equal reports whether x and y represent the same type set. -func (x *term) equal(y *term) bool { - // easy cases - switch { - case x == nil || y == nil: - return x == y - case x.typ == nil || y.typ == nil: - return x.typ == y.typ - } - // ∅ ⊂ x, y ⊂ 𓤠- - return x.tilde == y.tilde && types.Identical(x.typ, y.typ) -} - -// union returns the union x ∪ y: zero, one, or two non-nil terms. -func (x *term) union(y *term) (_, _ *term) { - // easy cases - switch { - case x == nil && y == nil: - return nil, nil // ∅ ∪ ∅ == ∅ - case x == nil: - return y, nil // ∅ ∪ y == y - case y == nil: - return x, nil // x ∪ ∅ == x - case x.typ == nil: - return x, nil // 𓤠∪ y == 𓤠- case y.typ == nil: - return y, nil // x ∪ 𓤠== 𓤠- } - // ∅ ⊂ x, y ⊂ 𓤠- - if x.disjoint(y) { - return x, y // x ∪ y == (x, y) if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∪ ~t == ~t - // ~t ∪ T == ~t - // T ∪ ~t == ~t - // T ∪ T == T - if x.tilde || !y.tilde { - return x, nil - } - return y, nil -} - -// intersect returns the intersection x ∩ y. -func (x *term) intersect(y *term) *term { - // easy cases - switch { - case x == nil || y == nil: - return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ - case x.typ == nil: - return y // 𓤠∩ y == y - case y.typ == nil: - return x // x ∩ 𓤠== x - } - // ∅ ⊂ x, y ⊂ 𓤠- - if x.disjoint(y) { - return nil // x ∩ y == ∅ if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∩ ~t == ~t - // ~t ∩ T == T - // T ∩ ~t == T - // T ∩ T == T - if !x.tilde || y.tilde { - return x - } - return y -} - -// includes reports whether t ∈ x. -func (x *term) includes(t types.Type) bool { - // easy cases - switch { - case x == nil: - return false // t ∈ ∅ == false - case x.typ == nil: - return true // t ∈ 𓤠== true - } - // ∅ ⊂ x ⊂ 𓤠- - u := t - if x.tilde { - u = under(u) - } - return types.Identical(x.typ, u) -} - -// subsetOf reports whether x ⊆ y. -func (x *term) subsetOf(y *term) bool { - // easy cases - switch { - case x == nil: - return true // ∅ ⊆ y == true - case y == nil: - return false // x ⊆ ∅ == false since x != ∅ - case y.typ == nil: - return true // x ⊆ 𓤠== true - case x.typ == nil: - return false // 𓤠⊆ y == false since y != 𓤠- } - // ∅ ⊂ x, y ⊂ 𓤠- - if x.disjoint(y) { - return false // x ⊆ y == false if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ⊆ ~t == true - // ~t ⊆ T == false - // T ⊆ ~t == true - // T ⊆ T == true - return !x.tilde || y.tilde -} - -// disjoint reports whether x ∩ y == ∅. -// x.typ and y.typ must not be nil. -func (x *term) disjoint(y *term) bool { - if debug && (x.typ == nil || y.typ == nil) { - panic("invalid argument(s)") - } - ux := x.typ - if y.tilde { - ux = under(ux) - } - uy := y.typ - if x.tilde { - uy = under(uy) - } - return !types.Identical(ux, uy) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 458010626..9872670e0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -343,7 +343,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.13.2 +# github.com/onsi/ginkgo/v2 v2.14.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -623,10 +623,9 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.14.0 +# golang.org/x/tools v0.16.1 ## explicit; go 1.18 golang.org/x/tools/go/ast/inspector -golang.org/x/tools/internal/typeparams # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine/internal From 43851d4dccdaed5786678e631437d836a22c393c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 17 Jan 2024 10:17:35 +0800 Subject: [PATCH 053/157] Update driver-parameters.md --- docs/driver-parameters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index 0232e487c..74aadc4e8 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -126,7 +126,7 @@ kubectl create secret generic azure-secret --from-literal azurestoragespnclients - mounting blob storage NFSv3 does not need account key, NFS mount access is configured by following setting: - `Firewalls and virtual networks`: select `Enabled from selected virtual networks and IP addresses` with same vnet as agent node - blobfuse cache(`--tmp-path` [mount option](https://github.com/Azure/azure-storage-fuse/tree/blobfuse-1.4.5#mount-options)) - - blobfuse cache is on `/mnt` directory by default, `/mnt` is mounted on temp disk if VM sku provides temp disk, `/mnt` is mounted on os disk if VM sku does not provide temp disk + - By default, the blobfuse cache is located in the `/mnt` directory. If the VM SKU provides a temporary disk, the `/mnt` directory is mounted on the temporary disk. However, if the VM SKU does not provide a temporary disk, the `/mnt` directory is mounted on the OS disk. - with blobfuse-proxy deployment (default on AKS), user could set `--tmp-path=` mount option to specify a different cache directory - [Mount an azure blob storage with a dedicated user-assigned managed identity](https://github.com/qxsch/Azure-Aks/tree/master/aks-blobfuse-mi) - [Blobfuse Performance and caching](https://github.com/Azure/azure-storage-fuse/tree/blobfuse-1.4.5#performance-and-caching) From 9bc4f7e8866452b84b6a278b46745d5dd9a3b5f4 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 17 Jan 2024 13:09:05 +0000 Subject: [PATCH 054/157] chore: upgrade sidecar image versions --- charts/README.md | 8 ++++---- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5969 -> 5969 bytes charts/latest/blob-csi-driver/values.yaml | 8 ++++---- deploy/csi-blob-controller.yaml | 6 +++--- deploy/csi-blob-node.yaml | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/charts/README.md b/charts/README.md index 043b9b91b..d914f4c9b 100644 --- a/charts/README.md +++ b/charts/README.md @@ -66,16 +66,16 @@ The following table lists the configurable parameters of the latest Azure Blob S | `image.blob.tag` | blob-csi-driver docker image tag | `latest` | | `image.blob.pullPolicy` | blob-csi-driver image pull policy | `IfNotPresent` | | `image.csiProvisioner.repository` | csi-provisioner docker image | `mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner` | -| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v3.6.2` | +| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v4.0.0` | | `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` | | `image.livenessProbe.repository` | liveness-probe docker image | `mcr.microsoft.com/oss/kubernetes-csi/livenessprobe` | -| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.11.0` | +| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.12.0` | | `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` | | `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar` | -| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.9.1` | +| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.10.0` | | `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` | | `image.csiResizer.repository` | csi-resizer docker image | `mcr.microsoft.com/oss/kubernetes-csi/csi-resizer` | -| `image.csiResizer.tag` | csi-resizer docker image tag | `v1.9.2` | +| `image.csiResizer.tag` | csi-resizer docker image tag | `v1.9.3` | | `image.csiResizer.pullPolicy` | csi-resizer image pull policy | `IfNotPresent` | | `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) | | `cloud` | the cloud environment the driver is running on | `AzurePublicCloud` | diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index 217a742e8016d55e189495e94ef47f7735c1beb1..3c231de5d9de32603ae219b3b162dfe399c62ef7 100644 GIT binary patch delta 5615 zcmVxQJ^j-V-2ZYUuFZHwY-vbWlt;;58EE&f$Ko{cyGQv}%*n=+0h*WK{!ryAN z8m-z(5Bbm%ExY~~*bPAJR{^YF|Lu0W-N~;1<4*gq{-2`}56_@mLui5pjKF7IAb_fu zwjkG>Er`+&zzYa4A)0r=1ULl=LXlAPEI;lPlUy8Y^|FDaOBO&uh-=O!P>o{L#aCSt z^g4}pXEQ;{9O&uAHG&xtlH)A~syc^D)8bM0Gz%5RYzs01L4SVVW{SD#kf7CW{Ht-a zo>a|tS>S}*#C0%niG87-)M~F@p%F}h1GNE3h8P9JIPw?=B*5k%g?c;z6bNw8mpM>P zhR_+(GAa}b4%050OX-VlGr@F&u%^Ro(Ue%+iKflyl*|GSmd_l^W>jKAfj1RCOCz&< zP8L`~G^fjEO@BYX4qSI^xap!_eyzPi7BZWfw+z4D@&hal$Pjx_s|nx>V#B#@vw%u9 zzcvRfp^GFB0KGy#tD&g>zeXV}!UkBNFT{1#m`xW4O&Lc^7BtWpq?(Q?b_F0ZRS(e@ zO!dP+fcl#w_JuYSJw_#Gt_!@YzO-c)XVZLLlzipio#@x;2wLv3#hC9 z&VY2$gTj3-lk5W)Q$OS9QH<

%Hgu8tAeNcG8d9>xzVtm`&F-qt zO8n0JvuJl`{saMIgi0QHN8E!#KxkF^j`tJ zq93u6hptgb(ZdVF&$Wvw#IIa7Q!?*%(TT=oBv}}8MH{~2%Og@me<|iYy*Hh-S|3UH zl*4ZUh$NKl2TBUOT(?ftt6;q4#M)0J7Mn=rG&)bDk=>Kr1Rx|Xs_UJCGZz|Z4drPG zs)rPruX-zN_5M8(QbaFcIt5#HQGLjhAO#?Qc0gT1;WKjs4?e*1+_pi8pNX6g9H3H* zL$t^}eE*5!1tzZ2PF>V4QaLBpRL)UOX#iDN-@0rv4Fp_scE4P4I!Qg%2niJuyKV%( z0*!%JK8$|uAMyQ_5A+>zVHkLco?hlf{a@xhXnKZ! z+WeF6U_v4&c2 z-n_}{-e=W@rKg@3R=`gsf`6w|@dZnJ?y%X{9nnroxKJ0F^>nUS|ENzXM(%1;z%t-k z7N+7oXMrD`>PCQ_pE;3mMQu*y6)=^$i0 zSL*Q(lqrQG{_z_1`foqIe~kvicUP~`&*!7zVEFzu>W@ZOBV~puTRK!PnhEtqApX1N z5*pmA(I{2wg6yK^0`sOzCe83kO|NOmSr<7lA(+-`%BXW0c2-JLt7!sM4tgP(SLhd} zZE)=ViUw0{M4zAc)RO`SHGe@5eIatL!M39sk{I-q{DrvmuwL4eb!fY^tp8Vn=WE|x zb9i;rA2;N^d@_wq@t@;XJLCU7IXyi-_WI^er(3EG*Wofe^n zY1C>r4B2zA?=~PaUJi+6AYLQi1uQ_FCA+my$=cFImPI2Y_Uz#SHGf8+;TkK`7Ck;H zmJBk>65*zJR>!`dvDX!*91`^SSVxwnck%yqwN<|UZ|C6pz(qs)PYeZZSpS`))8owg zKRr3bf1js4JT(7_7R2jn<6uf$kV_xVJT)!$9J=VA&Bw=DO@sCCeM}u~Gtkfrtj!Fa zGN9oe9xUX*lu$r*ZGZbIxH_`pS2%DbYM3koy%rmvO~g?_UM+xg;sE+X8K~4or$_p~ z#4|=As#`HKppO2(=B;+2{mX_I!<}L^3_UA@NQV_B%a~*1=WW!A8P^~&F`pAEr>Oon zVf{_i(*rR74^N8d5A8lvTB`p4>ZpEO-(@h&^O!Q%jA9dIOn*iI#vg6473;t*dU!z5 z+D?6&1}f~N569Ft9kbfAoMq!XFXPuu@Tp!BYP*3`HkuO7Z07ll>VreV)t-m*F_#vb zZawnQ;U2?08}=ShSxY8_Vc!?cMY|S-KV!#^T5FLqiGRg|hTDUI3f<6UCVUNryM=LG z=>X~`vcst+>k@12tYfPYd9{*#jH53CZA5OPruNI%B@b3n z6N5k0&VMB&S3&wngJ(9f`2Ct8BsPFF>O=-X+$WKI&@I?HGu{3`NvfFv839xewXdz< zBSkLW1o_D6o20#-Om5bSw@KBCHreb5bG_5VOFmsHqFpSJ36Qv7I(=rCRW9k*32ZTj zlwW-H;nOA<6J4a8xn+WuR0naqJ0VEx!HLRAoQRiHTH!kFqIzO-P5&rM z6MsEE=H9Kt-Q5H&@eDma*3AUC0xI&eKbq-%03pn0A}I!H+UwThtW(cD)b=ZO91cR9 zegDIGuQ%$C$Nwk*bOAfSEeNy3HbYc!MHV1YlF9;k*6y@gqToH;OO7p*5@$tw{8@T) zTmkDaKK6BpHq-EegL|CXKj)vPcE&_p2+`pw2JbG=K1p znqprRtLTZeMko_V1F65pYQ-Lj9Dqqzv3$nmS+QZ0<~B}mDVUkH@}I`Di}r~$gGOtf2dlmIO#=N{~m>mUsH~iJMJ8MxUa$y!k@%{tU@fo4=?E4=s z#)I4buy=hm7~T}^h+)#!YO&}Nt$#+VTTC{Q;#Ex9c#&37w-AGQ8XZZCHgN5bv`p&3 z45%#Vok<%hMigVPg!Zx*Aj)K#lyIao?Mx!IWt^W@c36ptDa^y$4s&c}R7AhV0MY4u z7I?l~rs$QRFU$?hU{?AsC!s6r*ktoPMLnX`{%jzA&%E{8nHVRp76X%$rdOa(65d%Nn++2@uuSZw^{!cOeycSE~%^)q@Up8=9g~9Fl+lIZg zP`$sp9rr%mUS7Swz3hMPUsk1>&l4<}2KX)35lZKwx=vgfOKjh1C4X2t6^6n7@bh4F zHT>8g-e|GEKEL^?kf#cO%L zUquLajj^jZ6T)rUz-3mwbHL6Ob(aPDVf{)QitS$Ir`Z0=ebiWj|t; zfPY7tHnt=WgluYWgnunFUu64<+2IVKWOn1xAz6)D&BYeh$n4#{R*1LI%@slJ?pAvu zt!n3ss*bZlx7%|#)BbMFF?C$9i~_R=4Fp)gIpb$K=lt(4>#vID{#!q+$E=ntH^XRw zle>^VeX`I+C=dDdTBzdZOqju>&CbmmhjIV+2-oq7gZ5KW=2Ug{D*J@sbG}ox82Yr7I9&qgH^1C~rqf}j= zg>c{y(229yC*5XKZ$sz38wK0Hx96D7;2Tn471lpx4m{~Xs6O@U|EP8PCVT(eo0F5y zVf{Zx3-kXlc#Mu=hx9)!9s5lD%OKoIZ3Mc&dMr7XFbk{1l}$|O%+y{-W>B_{lSU9K ze`+=W`fDRSWt{&xpMdyoc5(pKGyQU82a?Z@k**%*s&h?B) zD`e-nr{?kiWdQX8PUtMWON7wbU`E;MasM9dKws0DI^!i7yaDcsY@o$5x~hNga|myc zC*=uH_nwg6=*^S}#C{}bgiRG)se#qe{_p#SD!_>*SW1H!z zZmKWT5c8x@v{b%A>0J;PX6m4naoLx;Mtl9YgY)6-yV2F~ra$ZzyGZMlQ9>nfe{4&# z$R#NnOoQS0=KS*V_U+}>+jpPFg@NGo+-sRm-RIND+{shm5jMmkBDbIWqw(NsxD&C= z?Lei(vLoy4V`!a?W9#f?aGh;Ow=T?^qgiAQlo1;DZ*D)H|NHjpxu=VQJ<8Q@B9do3O+n>oV;#xwA38=SZsDGCqDt?EP~m_UeUMHD^uXS)6q$ z2j?{MU2|xQFN3ZiZv~gJ?N{nH639zJQI{Ph@M}=tUK->0c@8W*&#XS0e-4KGSkBMB z|DiV;eD05KNB#G=7Aags2tTn+EfSOUCy*kO+&%95^dR zl~M0I<~E*Gv3z$*3P|ku1;Z`Il@LA?2@Z#j9v_>&8_Wn5&C(D`5ffh}sUYs0`XcL5 zBtdu8fdxzPP)+a*)dXgrf7@3bLHg*L?3q4StVLo@6B%4No~xZX>Xz1P#5w!^hYz3L z_HTz*z5ea+{A0g}@~(9Y6{-X@&tIcHpd9g<%7AaL(xOVOg7M~6s8UD^(hOvjI3^^P z;;bMbTy&pl38VTdUTskdi9O%4u{{T+A@i)3-P&7>S*ujbkFW5}e<94?@w;v+iN3}; z8u%S7Ev#>Hw!RQKw}=C3OO^R`RqYPwwG*yy$2G$#S+=Rbz;5+OWVQi+nb8mf!ylz{7$$Z6*1_VNI_v$3v)54KiM z`iF*!I6TzKe2rR}e_dlN&rxHu?krh&PUFnSGi-jm@hx$58nxDPg)Hbk+Hn7Rr9Npy zUMeyccQ$!x%4*uLL>}sdUc64IIG$ggHxei0H>Fe-8eWa{ozLgg!Na^_Smr zwi2Zj^QA7f>igK<*WmMRZn+O%S^`xNS6y39lLXxuTit{y6JuQyU)@yVlfbbK^hxgO z^igs{>DC&Ftk{ol{hG6JkVdc#~ysW0vii~r2e{EV0jsRAJljRQGnocykLsYKo2+NvSl zGqf2jxR<~wc9WN>>Wgo8uOH-UX?OLH17M%%l0VT!f4|i0ziLbgyZM*8zpC*IEx-_H z0eqqXW+3aYQQaj%|Eo=C^&c}&>VJ;f_d)mDfuO#Fvocjv+f+m=UAtLCIfSrXEJVd| zRvZ&7fXW?aE*#BkRQJosZ61~_h}x|7I-n;Zv>D?Lp~)ptL_^oc?PbzxmSxrQtMx6R z4xzKJf3avYmmgb`kksxazrp0cV9RjTnxTwS)jPG=9N6ZH@G2#VO521k`@G6~ivEhk zV$HmleY~%#YQqb4{tRlif{NI)mVFY1rulihETJ7oQLI;=rcj?Et!x#Nbq*^@;|W0?j$HxWcuOZ zaC=Zet9t%_$|WEh&j0PBqfYkzpX1|`!} z6P`uc7YM^a&F{aJ>;F&yun~Z#c!D?S|5onzACFrH|L^m(TmU0&=*2rhCUdb+k*qzT zZEdh2x4$@Q3njSi-9**yB}!fXv#y&#ZxJUoA#1tV#IXd5t8i@KHj+-Fd)V~*xd+wy z*J<19|Lso#8}xQF{9XIx^yKu-QM>)-?7LR`r2VG#9ct~5h4K~%iTQV}Cx6q*4(`9CVV`^kE(oI? zv}o6`@2~!Ju>>J&4hWkQU+QNUe*_%RTbE7HSTc@hfG)>Ek zHCnZo9`d0jT6X;}up5BbuL4-V{@bc$*Z;{;`>_6>qY)3!pi@I=f(4AgXC36(yx|d> zGr^{^VKYy?b(2g13x6Hdy!%l!Z9$rm6ln;FXDj8Q{`Ldc9Ws~L%MR*K2TTq*2%zev zEy$3w1yT9|cmV+>MDs3~0H+{9C=!aE<;R_3l8fWUS>tFENtY~uf)GQ_CQyxH)Wug_ z67-H5?RKNJi6CVTboJsI!HfvW@fHJBox`PR@u+)>g`XPj3V*Xa0zv-RW`?=xkRa7= z{IqFyYqrY*C)_25!Neu@rFv4Uy?TX4Fa-|O1|%6`6cFRcV;qnGn}ZbU@dQvHz(HT; zKs6acXGn{vP$)P|J7_MYFFMTx(+$F!4zop5VsR&$HltHA3piLlb1a)ti3tVXRQN27 z%?$~hCLBIZ1dxb1y_B3x9e!S%eSQ?N4_Mlc1z!$`Z3)^M^ zm1=%%4p>45Nge=tg?>>(QU8C9LRf?ius~mk>#8xEE)JS9j+QKFpfN}_9aHQIKxC>O zqA!^0hk*d~H%IIXZ76z-O3qvtct?F{%dExKLrY-v3V$hP5u2iM$U?|(IOh?4#`W}v zLYqLR#c&5iH9o<1c2X4jm7$ID+@!R#*{LN)n8gBu;b`|To=hXAfGv+<}>Fa zhU%b)$2=MrPrzM=;YtIT&d9RRWgsN;GW`gZ40#~2gC(xn8YIyXp;zecM`2;#&l^69 zrz=>1HV~6A8gjxok;_XL3ymMBP}Y-S11bo%T(jmU`5EYwlLJE*8f=QfWQpJ&d%g>( zqyEl-bkKvseIb+V0~;-==PO>-9fXwvv-C@|*Er?S!pAmrkoq8&nZFuRvc|sjJYUW3 zs?SRN=S;`}lQaY=e{nzRUPDS^IkCl%aoIsfKb;+)6mNZVz^)jU z5^>aAb++`T|(hAa{~`P!1BVjL5N?7ocA4| zQj0^h$US`jiQ)w&uF_5&)Gksvr`1%>aZYIfRaoD;Y%&c53^}`Bt~i~fo@#`I3W;4e zf?t8gz$=$Vz1=uzoYsrF9ToIDiaQ=P+KnUAQGoo2@2`EJ?}!V7z)SS>GB4^snd6}8 z8GmZ?PridCbl>I%b&2gc46&SdP}9?PD;2?7n${Rp;-T@Pbo(3YE!^6 z;93@@;yq`9AD!w(fSq4Bk#J3IP%7Zv_i(80#x?9A(>a` zSEg-n?EZ%OQ*A_FTy)iw0tYpJeiwZqa<0L)qZ*Rvca{8wxb(1I+L3i=JGHF;SAyqj z-(7QXebXB^6ux0EOo$l*%GujMSCqm z71OBIZWyxXVBc*(X51VS%RszFz6)4@I!tzIp_a9!i!6&qR_yu118R(aK*Kdwwk>*m zR4f@}mLw2qv{ol^P^?{3q^q&|C+OYnQj?Ydq z>;LTZ5dVFi_VCdBCt47%qpgD}aX~J9IQP`F*mLNhe>NW)datmGA!r0h1lmLe}s9N3El?)Aas-)NZv7 z_W!fAJ3^ff>T1K@11f9DgfQ&;qPb|-qWEX**->jPQYNu~c+hZrFi@c%y3B;Hp>Ve_ zzAGI;-GmmQ{I=n)wu?UzCDZ{b(TM^VKIqtU9M397AH6puqFtyH`g5f{ze=!Knb9Iv z^E+J3@|o!vt-p|bV&b)@F~!Liyi}dIA9Gz|t)2C3H6piGvXAlfMWBtyZ`9O|`MTu6 zDr{o#huXP+gybqpA8GK+Mi#$cGlawjkVc(IA&C1VvJW~1TW6-*pD0N+Gaw^?>Y;YF z6?~+~$D1G@d3}?#*OST3TJbiiTG1w(9bv9_ns~{lONF$HB{BgL_eo=B8Y#v&52t^a>x3>j zVHx{ylb{u(j3ho*?rqYzjXrleU1P<%09zUML(bG0G=7edI*)eHD4>#f&^0H>wc|^K zMPkCKSjiS6j&0qTCo2)VO=cp7pLveOkb^$qHS6hR)kr5mG~!IWq|yr4X$RF4lWY1% zS(@m7@iF&q9q!I1Xo+X&@v&|uz!gxDpZ(EH?*j;7J`+hXP}5$w7UxIx+(T`@V#nbi z#QFC>Ty(pm-gx|v0zenA1KffzO>8qn1y^JN5+$iDkmv2AR!bDThkMDfWpd)IXpcWn zZ;mTq9mmH$v3~c(`Dtq%#3bNCp0`^?Kw+?d&zdRY{`_Y~86SQgf$d`AnXq4u0icWQ z7chb!&T#4-G7-2AYKAGK4Z-ZYtPBkksiT|3BTh3-OU6P+9(5!$Cm|*nZX)PWK62a) z_lhr&mP2^rbsQ{51$IWtS_}a*>?SOb_ED1IXwstiyJgadD=3Rp#D2fJVh!s2vP}bj z@2DyEMX`#WNNa>Lfi#f%Ypho6vB&|Kd=<-QT%H#jHfe6-{FZ{5Su6i(JiCbh8Ua_W ze0A&~{wZ!ZO25watswLIWU^OL&tuHXn}*p@uy?~>ZM(A;Wg-`*K@{&lKpmeED$l?F z;d0!+?G3ua>;B-TXh#gQwpNQpmuNMATAgCDi5#zD%EpVdin@gu%+u&dTC{;{hooh4 z4`x7RLGMi7NHL-qgC(?=y#P@r)uez z=d-}`n+VS;r=w?l-zfk z&2m#B+}bE)wN0F4hU)dK;6)7ld~-7#-wsFD|N3_^{k#@S;LRW{++Q|uScSpu`P+uQ zv{1djz8!Zz++JP3zrE^x?p;--nokrgnFjbh))7kQp}J078B1*6X(d>HIu(Y&-r#e8 zbUpak8{BBIA70%2Tyh=)X(!5cnwmwS=xD-?{|H7mPta!TaTR%cwlh<)KsyT;g6oC)DJZQwGi-Z^0Bn!3vZ{jh!|C79K|#fC`*MfE8-jN|8JHG$mn@3J2; zOTfP)O&eR12SPTrH^P>GnJ=>a#O!c}P%^vm=#VT&t>$72i)8lhUMs{~=;n$bcXz8j zkyf?yMODXHq1){_oN0fz=9oGzSVn9m-SagbN{U$)?-#nmYZR; zz{y?6pFUaWDwK!(dM#A(b0*AS(q`x8jl;PAyM*g_#qye)Os^Gx%uYK;u`1rkV_q+p zhcLI<9rngf`7W>#*@i7)Q_L|F*eQ?r6p3uMb73$y!NE%>P5gygh5Vn;wBG$nm`(Zr zr)OuG{GV34{pOJW^Bm2xtW?U6Fgg2;yB|&NV!LKCgf7Q@Q_r_P87S@P%cS`1kh_DW z#P)myzXyba6Bj*y`poImDZ8f0g0@^4TY|AL{MAT77#7XecSWXPFKrh-G6z=mbVD^S zLYjM2)PtTs2M;)Qb^YBP&{3+e&q6qG2~~Y{r1g1(2|$O6XjhMNz@qcT zHVK4^#a&Yeb2(>b{@YkeJmx3<)G(1nL1mr*3&SX*SmS}?dCV?@$5CTQO(Tdn(W5#eiSRZoW%0gVn5_? zl>1n79>dH%j8U_h=V9vP+_BB{R5#TZYKVE#Ct50Bq4X|@3o~_4%DC)HU8CLJ+y2Gi z_TA`uaMK%fi(RC3$|#`{IDfV!S>%!w4W|BJd~|?@CX}X5s}-^z0tUTJ=lp@=6aw~V%d=$?PF+18^?CEm%$xvJGymY-W<&$bD)gS zxOa2=@#0^%*TddG$4qYrpFZw1w%iR&r8E`2Cz0~1(dqWb7jLh6w|}38-HV%E_w7!! zGS@?8h;0rfDBH;j#JWI&0%>MJvi&WW*?3)yGQ!f#1Ez3+v^QaiLDps3XL4s_e9w_o zxn+F(kl6d*o!F}vX4RZEg=cYoR5>`Ok?)#ATYMRG1$isDjcvbDw~;_z5{kO)D1l#t z`u5To$Io+M;dy5D(SLL>+{bc${{0W#QU7yqbUW(3za3v+ez+Z8-29x!ke0DEnTK#6 zdl8~mXm1+K8!j2&??56f9&_Nl7*$5S@0iz4fN;@;rX`H(yLh!lDJ1rM%f|K`l!nZ+T6S%3 zF=nk&EkC})H-CpPd&%#*sU-Ry=V;(}u(Ytg%GvrtM zr)1ft0t36%Bbf;)twh?gP-QD~f435qL!ro_P$Y+{WLw;fHN86?Dp4x&w@W2T+G?mS zu2TZ8QzECCpWDj==+4Hv7CzWoJ?UQ>D&p`^EAusKWq)>!u{=kO&APK>;W>>nAJ4G) z@y55r(P`9L%N4Sq`)I@c>y`SX5qYV|SlrpUu+=+t{yXVOhbsPV{=q_xPdg9kJ z_N02cHP&0@=}6@^tbWugEgq$2-l3rHWeWPXH-F1ENMWV@p}IKc_77{VWww^JGGnhw zvFTr{3bMTZhflC=?d1~i4fTI#$7jdc`@i3`5BGmROUnqb`sfXFF{Qq^KP>(;Kl3wU zE~E;CG&Bz6oN@Ap-lYqD%fn z2Y>xqum7eoCG6&3>;9_7FSGzdpat-W2AF}YzeaVJ2>q`%q1AuPJgNUVYTpOlZwG?< z4$jI{O>I*Vt#s{X5#L8@HE9t67#+%dghAggS)IzJJD|&0KzLO+r$;m;45k|9~ySRcnSaPF3&J zVsl`dE5fUkBr0tay6p2R?_LLJo0tEAJ$5HSsUp)44~N@>3R>0k|5Gjj*>L`EA0HoO@BcYDIXj&HpQCL! z|3B+Spz9F6r7hrv6n}Caz7>_9yg1=mlzo9P9Mt^&TeiD8*=-Lqqb0j+ulu7?OvkP9~+bh f&41ARhjwU(c4&w8% Date: Wed, 17 Jan 2024 14:03:10 +0000 Subject: [PATCH 055/157] feat: upgrade azcopy version to 10.22.2 --- pkg/blobplugin/Dockerfile | 4 ++-- test/sanity/run-test.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index d7c89157f..6d35e7cf6 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -44,9 +44,9 @@ else \ RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy -ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.1-20231220/azcopy_linux_amd64_10.22.1.tar.gz +ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_amd64_10.22.2.tar.gz RUN if [ "$ARCH" == "arm64" ] ; then \ - azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.1-20231220/azcopy_linux_arm64_10.22.1.tar.gz; fi + azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_arm64_10.22.2.tar.gz; fi RUN wget -O azcopy.tar.gz ${azcopyURL} && \ tar xvzf azcopy.tar.gz -C . && rm azcopy.tar.gz && \ mv ./azcopy_linux_$ARCH_*/azcopy /usr/local/bin/azcopy && \ diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh index e20a76fef..018cd1dbd 100755 --- a/test/sanity/run-test.sh +++ b/test/sanity/run-test.sh @@ -34,9 +34,9 @@ fi azcopyPath="/usr/local/bin/azcopy" if [ ! -f "$azcopyPath" ]; then - echo 'Downloading azcopy...' azcopyTarFile="azcopy.tar.gz" - wget -O $azcopyTarFile https://aka.ms/downloadazcopy-v10-linux + echo 'Downloading azcopy...' + wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_arm64_10.22.2.tar.gz tar -zxvf $azcopyTarFile mv ./azcopy*/azcopy /usr/local/bin/azcopy rm -rf ./$azcopyTarFile From 5c8e59a9c46489dfdaf6e4c91fdb6aec3d287884 Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Wed, 13 Dec 2023 17:33:17 +0800 Subject: [PATCH 056/157] refactor:migrate blobclient to track2 sdk Signed-off-by: Fan Shang Xiang --- go.mod | 20 +- pkg/blob/blob.go | 15 +- pkg/blob/blob_test.go | 21 +- pkg/blob/controllerserver.go | 42 +++- pkg/blob/controllerserver_test.go | 222 +++++++++--------- vendor/modules.txt | 2 + .../mock_accountclient/interface.go | 131 +++++++++++ .../mock_blobcontainerclient/interface.go | 116 +++++++++ 8 files changed, 424 insertions(+), 145 deletions(-) create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient/interface.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient/interface.go diff --git a/go.mod b/go.mod index 353db5019..dbf2256d7 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,24 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/container-storage-interface/spec v1.8.0 github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 - github.com/imdario/mergo v0.3.9 // indirect github.com/kubernetes-csi/csi-lib-utils v0.16.0 + github.com/onsi/ginkgo/v2 v2.14.0 github.com/onsi/gomega v1.30.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 + github.com/pkg/errors v0.9.1 + github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 go.uber.org/mock v0.4.0 golang.org/x/net v0.20.0 @@ -37,20 +43,16 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect @@ -88,6 +90,7 @@ require ( github.com/google/uuid v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.9 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -99,17 +102,14 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.14.0 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect - github.com/satori/go.uuid v1.2.0 github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index a6c722a68..2f8bce6d7 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -27,7 +27,7 @@ import ( "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" azstorage "github.com/Azure/azure-sdk-for-go/storage" az "github.com/Azure/go-autorest/autorest/azure" "github.com/container-storage-interface/spec/lib/go/csi" @@ -44,6 +44,7 @@ import ( csicommon "sigs.k8s.io/blob-csi-driver/pkg/csi-common" "sigs.k8s.io/blob-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/provider" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -203,6 +204,8 @@ type Driver struct { csicommon.CSIDriver cloud *azure.Cloud + clientFactory azclient.ClientFactory + networkClientFactory azclient.ClientFactory KubeClient kubernetes.Interface blobfuseProxyEndpoint string // enableBlobMockMount is only for testing, DO NOT set as true in non-testing scenario @@ -266,6 +269,10 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p d.Name = options.DriverName d.Version = driverVersion d.NodeID = options.NodeID + if d.cloud != nil { + d.clientFactory = d.cloud.ComputeClientFactory + d.networkClientFactory = d.cloud.NetworkClientFactory + } var err error getter := func(key string) (interface{}, error) { return nil, nil } @@ -761,7 +768,7 @@ func isSupportedAccessTier(accessTier string) bool { if accessTier == "" { return true } - for _, tier := range storage.PossibleAccessTierValues() { + for _, tier := range armstorage.PossibleAccessTierValues() { if accessTier == string(tier) { return true } @@ -899,11 +906,11 @@ func (d *Driver) GetStorageAccesskey(ctx context.Context, accountOptions *azure. // GetInfoFromSecret get info from k8s secret // return func (d *Driver) GetInfoFromSecret(ctx context.Context, secretName, secretNamespace string) (string, string, string, string, string, string, string, error) { - if d.cloud.KubeClient == nil { + if d.KubeClient == nil { return "", "", "", "", "", "", "", fmt.Errorf("could not get account key from secret(%s): KubeClient is nil", secretName) } - secret, err := d.cloud.KubeClient.CoreV1().Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{}) + secret, err := d.KubeClient.CoreV1().Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil { return "", "", "", "", "", "", "", fmt.Errorf("could not get secret(%v): %w", secretName, err) } diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 86cc92459..4283f0bbd 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -957,7 +957,7 @@ func TestGetContainerReference(t *testing.T) { d := NewFakeDriver() d.cloud = azure.GetTestCloud(gomock.NewController(t)) - d.cloud.KubeClient = fake.NewSimpleClientset() + d.KubeClient = fake.NewSimpleClientset() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -1076,8 +1076,7 @@ func TestGetStorageAccesskey(t *testing.T) { }, } d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.cloud.KubeClient = fake.NewSimpleClientset() + d.KubeClient = fake.NewSimpleClientset() secret := &v1api.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: secretNamespace, @@ -1090,7 +1089,7 @@ func TestGetStorageAccesskey(t *testing.T) { Type: "Opaque", } secret.Namespace = secretNamespace - _, secretCreateErr := d.cloud.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, secretCreateErr := d.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if secretCreateErr != nil { t.Error("failed to create secret") } @@ -1120,7 +1119,7 @@ func TestGetInfoFromSecret(t *testing.T) { testFunc: func(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{} - d.cloud.KubeClient = nil + d.KubeClient = nil secretName := "foo" secretNamespace := "bar" _, _, _, _, _, _, _, err := d.GetInfoFromSecret(context.TODO(), secretName, secretNamespace) @@ -1134,8 +1133,7 @@ func TestGetInfoFromSecret(t *testing.T) { name: "Could not get secret", testFunc: func(t *testing.T) { d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.cloud.KubeClient = fakeClient + d.KubeClient = fakeClient secretName := "" secretNamespace := "" _, _, _, _, _, _, _, err := d.GetInfoFromSecret(context.TODO(), secretName, secretNamespace) @@ -1150,8 +1148,7 @@ func TestGetInfoFromSecret(t *testing.T) { name: "get account name from secret", testFunc: func(t *testing.T) { d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.cloud.KubeClient = fakeClient + d.KubeClient = fakeClient secretName := "store_account_name_key" secretNamespace := "namespace" accountName := "bar" @@ -1167,7 +1164,7 @@ func TestGetInfoFromSecret(t *testing.T) { }, Type: "Opaque", } - _, secretCreateErr := d.cloud.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, secretCreateErr := d.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if secretCreateErr != nil { t.Error("failed to create secret") } @@ -1187,7 +1184,7 @@ func TestGetInfoFromSecret(t *testing.T) { testFunc: func(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{} - d.cloud.KubeClient = fakeClient + d.KubeClient = fakeClient secretName := "store_other_info" secretNamespace := "namespace" accountName := "bar" @@ -1211,7 +1208,7 @@ func TestGetInfoFromSecret(t *testing.T) { }, Type: "Opaque", } - _, secretCreateErr := d.cloud.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, secretCreateErr := d.KubeClient.CoreV1().Secrets(secretNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if secretCreateErr != nil { t.Error("failed to create secret") } diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 4c7b7ed80..b55f25f0c 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/status" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" @@ -42,6 +43,7 @@ import ( "k8s.io/utils/pointer" "sigs.k8s.io/blob-csi-driver/pkg/util" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/provider" @@ -261,7 +263,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, status.Errorf(codes.InvalidArgument, "protocol(%s) is not supported, supported protocol list: %v", protocol, supportedProtocolList) } if !isSupportedAccessTier(accessTier) { - return nil, status.Errorf(codes.InvalidArgument, "accessTier(%s) is not supported, supported AccessTier list: %v", accessTier, storage.PossibleAccessTierValues()) + return nil, status.Errorf(codes.InvalidArgument, "accessTier(%s) is not supported, supported AccessTier list: %v", accessTier, armstorage.PossibleAccessTierValues()) } if containerName != "" && containerNamePrefix != "" { @@ -275,7 +277,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if strings.EqualFold(networkEndpointType, privateEndpoint) { createPrivateEndpoint = pointer.BoolPtr(true) } - accountKind := string(storage.KindStorageV2) + accountKind := string(armstorage.KindStorageV2) if isNFSProtocol(protocol) { isHnsEnabled = pointer.Bool(true) enableNfsV3 = pointer.Bool(true) @@ -293,11 +295,11 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } if strings.HasPrefix(strings.ToLower(storageAccountType), "premium") { - accountKind = string(storage.KindBlockBlobStorage) + accountKind = string(armstorage.KindBlockBlobStorage) } if IsAzureStackCloud(d.cloud) { - accountKind = string(storage.KindStorage) - if storageAccountType != "" && storageAccountType != string(storage.SkuNameStandardLRS) && storageAccountType != string(storage.SkuNamePremiumLRS) { + accountKind = string(armstorage.KindStorage) + if storageAccountType != "" && storageAccountType != string(armstorage.SKUNameStandardLRS) && storageAccountType != string(armstorage.SKUNamePremiumLRS) { return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid skuName value: %s, as Azure Stack only supports %s and %s Storage Account types.", storageAccountType, storage.SkuNamePremiumLRS, storage.SkuNameStandardLRS)) } } @@ -449,7 +451,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } } - secretName, err := setAzureCredentials(ctx, d.cloud.KubeClient, accountName, accountKey, secretNamespace) + secretName, err := setAzureCredentials(ctx, d.KubeClient, accountName, accountKey, secretNamespace) if err != nil { return nil, status.Errorf(codes.Internal, "failed to store storage account key: %v", err) } @@ -569,8 +571,12 @@ func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.Valida if resourceGroupName == "" { resourceGroupName = d.cloud.ResourceGroup } - blobContainer, retryErr := d.cloud.BlobClient.GetContainer(ctx, subsID, resourceGroupName, accountName, containerName) - err = retryErr.Error() + blobClient, err := d.clientFactory.GetBlobContainerClientForSub(subsID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + blobContainer, err := blobClient.Get(ctx, resourceGroupName, accountName, containerName) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -678,12 +684,17 @@ func (d *Driver) CreateBlobContainer(ctx context.Context, subsID, resourceGroupN } _, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) } else { - blobContainer := storage.BlobContainer{ - ContainerProperties: &storage.ContainerProperties{ - PublicAccess: storage.PublicAccessNone, + blobContainer := armstorage.BlobContainer{ + ContainerProperties: &armstorage.ContainerProperties{ + PublicAccess: to.Ptr(armstorage.PublicAccessNone), }, } - err = d.cloud.BlobClient.CreateContainer(ctx, subsID, resourceGroupName, accountName, containerName, blobContainer).Error() + var blobClient blobcontainerclient.Interface + blobClient, err = d.clientFactory.GetBlobContainerClientForSub(subsID) + if err != nil { + return true, err + } + _, err = blobClient.CreateContainer(ctx, resourceGroupName, accountName, containerName, blobContainer) } if err != nil { if strings.Contains(err.Error(), containerBeingDeletedDataplaneAPIError) || @@ -710,7 +721,12 @@ func (d *Driver) DeleteBlobContainer(ctx context.Context, subsID, resourceGroupN } _, err = container.DeleteIfExists(nil) } else { - err = d.cloud.BlobClient.DeleteContainer(ctx, subsID, resourceGroupName, accountName, containerName).Error() + var blobClient blobcontainerclient.Interface + blobClient, err = d.clientFactory.GetBlobContainerClientForSub(subsID) + if err != nil { + return true, err + } + err = blobClient.DeleteContainer(ctx, resourceGroupName, accountName, containerName) } if err != nil { if strings.Contains(err.Error(), containerBeingDeletedDataplaneAPIError) || diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index abf82d459..d7961cc2e 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -19,11 +19,11 @@ package blob import ( "context" "fmt" - "net/http" "reflect" "strings" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" @@ -31,10 +31,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" "sigs.k8s.io/blob-csi-driver/pkg/util" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" - "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/blobclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient" azure "sigs.k8s.io/cloud-provider-azure/pkg/provider" "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" @@ -50,63 +51,6 @@ const ( NULL ) -var _ blobclient.Interface = &mockBlobClient{} - -// mock blobclient that returns the errortype for create/delete/get operations (default value nil) -type mockBlobClient struct { - // type of error returned - errorType *errType - // custom string for error type CUSTOM - custom *string - conProp *storage.ContainerProperties -} - -func (c *mockBlobClient) CreateContainer(_ context.Context, _, _, _, _ string, _ storage.BlobContainer) *retry.Error { - switch *c.errorType { - case DATAPLANE: - return retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedDataplaneAPIError)) - case MANAGEMENT: - return retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedManagementAPIError)) - case CUSTOM: - return retry.GetError(&http.Response{}, fmt.Errorf(*c.custom)) - } - return nil -} -func (c *mockBlobClient) DeleteContainer(_ context.Context, _, _, _, _ string) *retry.Error { - switch *c.errorType { - case DATAPLANE: - return retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedDataplaneAPIError)) - case MANAGEMENT: - return retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedManagementAPIError)) - case CUSTOM: - return retry.GetError(&http.Response{}, fmt.Errorf(*c.custom)) - } - return nil -} -func (c *mockBlobClient) GetContainer(_ context.Context, _, _, _, _ string) (storage.BlobContainer, *retry.Error) { - switch *c.errorType { - case DATAPLANE: - return storage.BlobContainer{ContainerProperties: c.conProp}, retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedDataplaneAPIError)) - case MANAGEMENT: - return storage.BlobContainer{ContainerProperties: c.conProp}, retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedManagementAPIError)) - case CUSTOM: - return storage.BlobContainer{ContainerProperties: c.conProp}, retry.GetError(&http.Response{}, fmt.Errorf(*c.custom)) - } - return storage.BlobContainer{ContainerProperties: c.conProp}, nil -} - -func (c *mockBlobClient) GetServiceProperties(_ context.Context, _, _, _ string) (storage.BlobServiceProperties, error) { - return storage.BlobServiceProperties{}, nil -} - -func (c *mockBlobClient) SetServiceProperties(_ context.Context, _, _, _ string, _ storage.BlobServiceProperties) (storage.BlobServiceProperties, error) { - return storage.BlobServiceProperties{}, nil -} - -func newMockBlobClient(errorType *errType, custom *string, conProp *storage.ContainerProperties) blobclient.Interface { - return &mockBlobClient{errorType: errorType, custom: custom, conProp: conProp} -} - // creates and returns mock storage account client func NewMockSAClient(_ context.Context, ctrl *gomock.Controller, _, _, _ string, keyList *[]storage.AccountKey) *mockstorageaccountclient.MockInterface { cl := mockstorageaccountclient.NewMockInterface(ctrl) @@ -651,9 +595,13 @@ func TestCreateVolume(t *testing.T) { Value: &fakeValue, }) d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) + controller := gomock.NewController(t) - errorType := DATAPLANE - d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} + clientFactoryMock := mock_azclient.NewMockClientFactory(controller) + blobClientMock := mock_blobcontainerclient.NewMockInterface(controller) + blobClientMock.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("timed out waiting for the condition")) + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil) + d.clientFactory = clientFactoryMock mp := make(map[string]string) mp[protocolField] = "fuse" @@ -679,6 +627,7 @@ func TestCreateVolume(t *testing.T) { if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } + controller.Finish() }, }, { @@ -687,12 +636,14 @@ func TestCreateVolume(t *testing.T) { d := NewFakeDriver() d.cloud = &azure.Cloud{} d.cloud.SubscriptionID = "subID" - - keyList := make([]storage.AccountKey, 0) - d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) - - errorType := NULL - d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} + controller := gomock.NewController(t) + var mockKeyList []storage.AccountKey + d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &mockKeyList) + clientFactoryMock := mock_azclient.NewMockClientFactory(controller) + blobClientMock := mock_blobcontainerclient.NewMockInterface(controller) + blobClientMock.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil) + d.clientFactory = clientFactoryMock mp := make(map[string]string) mp[storeAccountKeyField] = trueValue @@ -719,6 +670,7 @@ func TestCreateVolume(t *testing.T) { if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v\nExpected error: %v", err, expectedErr) } + controller.Finish() }, }, { @@ -735,10 +687,15 @@ func TestCreateVolume(t *testing.T) { KeyName: &fakeKey, Value: &fakeValue, }) - d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) + controller := gomock.NewController(t) + + d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), controller, "subID", "unit-test", "unit-test", &keyList) - errorType := NULL - d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} + clientFactoryMock := mock_azclient.NewMockClientFactory(controller) + blobClientMock := mock_blobcontainerclient.NewMockInterface(controller) + blobClientMock.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil) + d.clientFactory = clientFactoryMock mp := make(map[string]string) mp[protocolField] = "fuse" @@ -763,6 +720,7 @@ func TestCreateVolume(t *testing.T) { if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) } + controller.Finish() }, }, //nolint:dupl @@ -783,8 +741,7 @@ func TestCreateVolume(t *testing.T) { d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) d.cloud.Config.AzureAuthConfig.UseManagedIdentityExtension = true - errorType := NULL - d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} + d.clientFactory = mock_azclient.NewMockClientFactory(gomock.NewController(t)) mp := make(map[string]string) mp[protocolField] = "fuse" @@ -849,8 +806,7 @@ func TestCreateVolume(t *testing.T) { d.cloud.StorageAccountClient = NewMockSAClient(context.Background(), gomock.NewController(t), "subID", "unit-test", "unit-test", &keyList) d.cloud.Config.AzureAuthConfig.UseManagedIdentityExtension = true - errorType := NULL - d.cloud.BlobClient = &mockBlobClient{errorType: &errorType} + d.clientFactory = mock_azclient.NewMockClientFactory(gomock.NewController(t)) mp := make(map[string]string) mp[protocolField] = "fuse" @@ -1068,7 +1024,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { name string req *csi.ValidateVolumeCapabilitiesRequest clientErr errType - containerProp *storage.ContainerProperties + containerProp *armstorage.ContainerProperties expectedRes *csi.ValidateVolumeCapabilitiesResponse expectedErr error }{ @@ -1147,9 +1103,9 @@ func TestValidateVolumeCapabilities(t *testing.T) { Secrets: map[string]string{}, }, clientErr: DATAPLANE, - containerProp: &storage.ContainerProperties{}, + containerProp: &armstorage.ContainerProperties{}, expectedRes: nil, - expectedErr: status.Errorf(codes.Internal, retry.GetError(&http.Response{}, fmt.Errorf(containerBeingDeletedDataplaneAPIError)).Error().Error()), + expectedErr: status.Errorf(codes.Internal, fmt.Errorf(containerBeingDeletedDataplaneAPIError).Error()), }, { name: "Requested Volume does not exist", @@ -1159,7 +1115,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { Secrets: map[string]string{}, }, clientErr: NULL, - containerProp: &storage.ContainerProperties{}, + containerProp: &armstorage.ContainerProperties{}, expectedRes: nil, expectedErr: status.Errorf(codes.NotFound, fmt.Sprintf("requested volume(%s) does not exist", "unit#test#test")), }, @@ -1181,31 +1137,48 @@ func TestValidateVolumeCapabilities(t *testing.T) { expectedErr: nil, },*/ } - d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.Cap = []*csi.ControllerServiceCapability{ - controllerServiceCapability, - } - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl) - d.cloud.StorageAccountClient = mockStorageAccountsClient - s := "unit-test" - accountkey := storage.AccountKey{ - Value: &s, - } - accountkeylist := []storage.AccountKey{} - accountkeylist = append(accountkeylist, accountkey) - list := storage.AccountListKeysResult{ - Keys: &accountkeylist, - } - mockStorageAccountsClient.EXPECT().ListKeys(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(list, nil).AnyTimes() for _, test := range testCases { + d := NewFakeDriver() + d.cloud = &azure.Cloud{} + d.Cap = []*csi.ControllerServiceCapability{ + controllerServiceCapability, + } + ctrl := gomock.NewController(t) + + mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl) + d.cloud.StorageAccountClient = mockStorageAccountsClient + s := "unit-test" + accountkey := storage.AccountKey{ + Value: &s, + } + accountkeylist := []storage.AccountKey{} + accountkeylist = append(accountkeylist, accountkey) + list := storage.AccountListKeysResult{ + Keys: &accountkeylist, + } + mockStorageAccountsClient.EXPECT().ListKeys(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(list, nil).AnyTimes() + + clientFactoryMock := mock_azclient.NewMockClientFactory(ctrl) + blobClientMock := mock_blobcontainerclient.NewMockInterface(ctrl) + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil).AnyTimes() + blobClientMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armstorage.BlobContainer, rerr error) { + switch test.clientErr { + case DATAPLANE: + return nil, fmt.Errorf(containerBeingDeletedDataplaneAPIError) + case MANAGEMENT: + return nil, fmt.Errorf(containerBeingDeletedManagementAPIError) + } + return &armstorage.BlobContainer{ + ContainerProperties: test.containerProp, + }, nil + }).AnyTimes() + d.clientFactory = clientFactoryMock res, err := d.ValidateVolumeCapabilities(context.Background(), test.req) - d.cloud.BlobClient = newMockBlobClient(&test.clientErr, pointer.String(""), test.containerProp) assert.Equal(t, test.expectedErr, err, "Error in testcase (%s): Errors must match", test.name) assert.Equal(t, test.expectedRes, res, "Error in testcase (%s): Response must match", test.name) + ctrl.Finish() } } @@ -1415,19 +1388,38 @@ func TestCreateBlobContainer(t *testing.T) { secrets: map[string]string{}, clientErr: CUSTOM, customErrStr: "foobar", - expectedErr: retry.GetError(&http.Response{}, fmt.Errorf("foobar")).Error(), + expectedErr: fmt.Errorf("foobar"), }, } d := NewFakeDriver() d.cloud = &azure.Cloud{} - conProp := &storage.ContainerProperties{} for _, test := range tests { - d.cloud.BlobClient = newMockBlobClient(&test.clientErr, &test.customErrStr, conProp) + controller := gomock.NewController(t) + clientFactoryMock := mock_azclient.NewMockClientFactory(controller) + blobClientMock := mock_blobcontainerclient.NewMockInterface(controller) + accountClientMock := mock_accountclient.NewMockInterface(controller) + clientFactoryMock.EXPECT().GetAccountClientForSub(gomock.Any()).Return(accountClientMock, nil).AnyTimes() + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil).AnyTimes() + d.clientFactory = clientFactoryMock + blobClientMock.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, resourceGroupName, accountName, containerName string, parameters armstorage.BlobContainer) (*armstorage.BlobContainer, error) { + if test.clientErr == DATAPLANE { + return nil, fmt.Errorf(containerBeingDeletedDataplaneAPIError) + } + if test.clientErr == MANAGEMENT { + return nil, fmt.Errorf(containerBeingDeletedManagementAPIError) + } + if test.clientErr == CUSTOM { + return nil, fmt.Errorf(test.customErrStr) + } + return nil, nil + }).AnyTimes() err := d.CreateBlobContainer(context.Background(), test.subsID, test.rg, test.accountName, test.containerName, test.secrets) if !reflect.DeepEqual(err, test.expectedErr) { t.Errorf("test(%s), actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } + controller.Finish() } } @@ -1486,20 +1478,38 @@ func TestDeleteBlobContainer(t *testing.T) { clientErr: CUSTOM, customErrStr: "foobar", expectedErr: fmt.Errorf("failed to delete container(%s) on account(%s), error: %w", "containerName", "", - retry.GetError(&http.Response{}, fmt.Errorf("foobar")).Error()), + fmt.Errorf("foobar")), }, } d := NewFakeDriver() - d.cloud = &azure.Cloud{} - connProp := &storage.ContainerProperties{} for _, test := range tests { - d.cloud.BlobClient = newMockBlobClient(&test.clientErr, &test.customErrStr, connProp) + controller := gomock.NewController(t) + clientFactoryMock := mock_azclient.NewMockClientFactory(controller) + blobClientMock := mock_blobcontainerclient.NewMockInterface(controller) + accountClientMock := mock_accountclient.NewMockInterface(controller) + clientFactoryMock.EXPECT().GetAccountClientForSub(gomock.Any()).Return(accountClientMock, nil).AnyTimes() + clientFactoryMock.EXPECT().GetBlobContainerClientForSub(gomock.Any()).Return(blobClientMock, nil).AnyTimes() + d.clientFactory = clientFactoryMock + blobClientMock.EXPECT().DeleteContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, resourceGroupName, accountName, containerName string) error { + if test.clientErr == DATAPLANE { + return fmt.Errorf(containerBeingDeletedDataplaneAPIError) + } + if test.clientErr == MANAGEMENT { + return fmt.Errorf(containerBeingDeletedManagementAPIError) + } + if test.clientErr == CUSTOM { + return fmt.Errorf(test.customErrStr) + } + return nil + }).AnyTimes() err := d.DeleteBlobContainer(context.Background(), test.subsID, test.rg, test.accountName, test.containerName, test.secrets) if !reflect.DeepEqual(err, test.expectedErr) { t.Errorf("test(%s), actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } + controller.Finish() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 9872670e0..64dae92b6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1613,8 +1613,10 @@ sigs.k8s.io/cloud-provider-azure/pkg/version ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobservicepropertiesclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient/interface.go new file mode 100644 index 000000000..3b05c8f5d --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient/interface.go @@ -0,0 +1,131 @@ +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +// Code generated by MockGen. DO NOT EDIT. +// Source: accountclient/interface.go +// +// Generated by this command: +// +// mockgen -package mock_accountclient -source accountclient/interface.go +// + +// Package mock_accountclient is a generated GoMock package. +package mock_accountclient + +import ( + context "context" + reflect "reflect" + + armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + gomock "go.uber.org/mock/gomock" +) + +// MockInterface is a mock of Interface interface. +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface. +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance. +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockInterface) Create(ctx context.Context, resourceGroupName, accountName string, resource *armstorage.AccountCreateParameters) (*armstorage.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", ctx, resourceGroupName, accountName, resource) + ret0, _ := ret[0].(*armstorage.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Create indicates an expected call of Create. +func (mr *MockInterfaceMockRecorder) Create(ctx, resourceGroupName, accountName, resource any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockInterface)(nil).Create), ctx, resourceGroupName, accountName, resource) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, accountName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, accountName) +} + +// GetProperties mocks base method. +func (m *MockInterface) GetProperties(ctx context.Context, resourceGroupName, accountName string, options *armstorage.AccountsClientGetPropertiesOptions) (*armstorage.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProperties", ctx, resourceGroupName, accountName, options) + ret0, _ := ret[0].(*armstorage.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProperties indicates an expected call of GetProperties. +func (mr *MockInterfaceMockRecorder) GetProperties(ctx, resourceGroupName, accountName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProperties", reflect.TypeOf((*MockInterface)(nil).GetProperties), ctx, resourceGroupName, accountName, options) +} + +// List mocks base method. +func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]*armstorage.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) + ret0, _ := ret[0].([]*armstorage.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) +} + +// ListKeys mocks base method. +func (m *MockInterface) ListKeys(ctx context.Context, resourceGroupName, accountName string) ([]*armstorage.AccountKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListKeys", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].([]*armstorage.AccountKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListKeys indicates an expected call of ListKeys. +func (mr *MockInterfaceMockRecorder) ListKeys(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeys", reflect.TypeOf((*MockInterface)(nil).ListKeys), ctx, resourceGroupName, accountName) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient/interface.go new file mode 100644 index 000000000..caa9c9420 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient/interface.go @@ -0,0 +1,116 @@ +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +// Code generated by MockGen. DO NOT EDIT. +// Source: blobcontainerclient/interface.go +// +// Generated by this command: +// +// mockgen -package mock_blobcontainerclient -source blobcontainerclient/interface.go +// + +// Package mock_blobcontainerclient is a generated GoMock package. +package mock_blobcontainerclient + +import ( + context "context" + reflect "reflect" + + armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + gomock "go.uber.org/mock/gomock" +) + +// MockInterface is a mock of Interface interface. +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface. +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance. +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// CreateContainer mocks base method. +func (m *MockInterface) CreateContainer(ctx context.Context, resourceGroupName, accountName, containerName string, parameters armstorage.BlobContainer) (*armstorage.BlobContainer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateContainer", ctx, resourceGroupName, accountName, containerName, parameters) + ret0, _ := ret[0].(*armstorage.BlobContainer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateContainer indicates an expected call of CreateContainer. +func (mr *MockInterfaceMockRecorder) CreateContainer(ctx, resourceGroupName, accountName, containerName, parameters any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainer", reflect.TypeOf((*MockInterface)(nil).CreateContainer), ctx, resourceGroupName, accountName, containerName, parameters) +} + +// DeleteContainer mocks base method. +func (m *MockInterface) DeleteContainer(ctx context.Context, resourceGroupName, accountName, containerName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteContainer", ctx, resourceGroupName, accountName, containerName) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteContainer indicates an expected call of DeleteContainer. +func (mr *MockInterfaceMockRecorder) DeleteContainer(ctx, resourceGroupName, accountName, containerName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockInterface)(nil).DeleteContainer), ctx, resourceGroupName, accountName, containerName) +} + +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, parentResourceName, resourceName string) (*armstorage.BlobContainer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, parentResourceName, resourceName) + ret0, _ := ret[0].(*armstorage.BlobContainer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, parentResourceName, resourceName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, parentResourceName, resourceName) +} + +// List mocks base method. +func (m *MockInterface) List(ctx context.Context, resourceGroupName, parentResourceName string) ([]*armstorage.ListContainerItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, parentResourceName) + ret0, _ := ret[0].([]*armstorage.ListContainerItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, parentResourceName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, parentResourceName) +} From 9150185e82d3edb41d9eec7569be989820016aa3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Jan 2024 09:11:09 +0000 Subject: [PATCH 057/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.14.0 to 2.15.0 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.14.0 to 2.15.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.14.0...v2.15.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 18 ++++++++++++++++++ .../v2/ginkgo/internal/profiles_and_reports.go | 4 ++-- .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 3 ++- .../onsi/ginkgo/v2/ginkgo/outline/import.go | 9 +-------- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 5 +++++ .../onsi/ginkgo/v2/internal/spec_context.go | 4 ++-- .../onsi/ginkgo/v2/internal/suite.go | 6 +++--- .../onsi/ginkgo/v2/reporters/junit_report.go | 12 ++++++++++++ .../github.com/onsi/ginkgo/v2/types/version.go | 2 +- vendor/modules.txt | 4 ++-- 12 files changed, 51 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index dbf2256d7..9bca0ece1 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.14.0 + github.com/onsi/ginkgo/v2 v2.15.0 github.com/onsi/gomega v1.30.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 diff --git a/go.sum b/go.sum index cc1b76cd2..ba95e786c 100644 --- a/go.sum +++ b/go.sum @@ -209,8 +209,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index fbe515639..9a65dd10c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,21 @@ +## 2.15.0 + +### Features + +- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70] +- include cancellation reason when cancelling spec context [96e915c] + +### Fixes + +- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09] +- fix outline when using nodot in ginkgo v2 [dca77c8] +- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f] +- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14] + +### Maintenance + +- Bump to go 1.20 [4fcd0b3] + ## 2.14.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d028..26de28b57 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -144,7 +144,7 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, combines them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { combined := &bytes.Buffer{} modeRegex := regexp.MustCompile(`^mode: .*\n`) @@ -184,7 +184,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccbf..5d8d00bb1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab75..f0a6b5d26 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 639541a16..02c6739e5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -15,6 +15,11 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's GinkgoT() takes an optional offset argument that can be used to get the correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following: + +- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf. +- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model. + You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2515b84a1..2d2ea2fc3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -17,7 +17,7 @@ type specContext struct { context.Context *ProgressReporterManager - cancel context.CancelFunc + cancel context.CancelCauseFunc suite *Suite } @@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 6746152ec..2b4db48af 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -858,7 +858,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } sc := NewSpecContext(suite) - defer sc.cancel() + defer sc.cancel(fmt.Errorf("spec has finished")) suite.selectiveLock.Lock() suite.currentSpecContext = sc @@ -958,7 +958,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck - sc.cancel() + sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) //and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: @@ -985,7 +985,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } progressReport = progressReport.WithoutOtherGoroutines() - sc.cancel() + sc.cancel(fmt.Errorf(interruptStatus.Message())) if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 816042208..43244a9bd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -195,6 +200,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +213,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 7015be128..ed9346474 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.14.0" +const VERSION = "2.15.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 64dae92b6..a9a7d13fb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -343,8 +343,8 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.14.0 -## explicit; go 1.18 +# github.com/onsi/ginkgo/v2 v2.15.0 +## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter From 63f637865c8bf4b846379f38bfcf2eaf5bba2af0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 16:56:02 +0000 Subject: [PATCH 058/157] chore(deps): bump github.com/onsi/gomega from 1.30.0 to 1.31.1 Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.30.0 to 1.31.1. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.30.0...v1.31.1) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +-- vendor/github.com/onsi/gomega/CHANGELOG.md | 25 +++++++++++++++++++ vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../onsi/gomega/internal/async_assertion.go | 7 +++++- vendor/github.com/onsi/gomega/matchers.go | 2 +- .../matchers/be_comparable_to_matcher.go | 4 +-- vendor/modules.txt | 4 +-- 8 files changed, 40 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 9bca0ece1..3cc6998cb 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.3 github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/ginkgo/v2 v2.15.0 - github.com/onsi/gomega v1.30.0 + github.com/onsi/gomega v1.31.1 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index ba95e786c..47abed1f1 100644 --- a/go.sum +++ b/go.sum @@ -211,8 +211,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b18..9a14b8151 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,28 @@ +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366a..5b46a1658 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.31.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce..cde9e2ec8 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 43f994374..8860d677f 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -394,7 +394,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb919..4e3897858 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/modules.txt b/vendor/modules.txt index a9a7d13fb..7b6315e15 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -365,8 +365,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.30.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.31.1 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gcustom From d37a5092f12c89d381b62e0083cbff5cc422c29a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:23:26 +0000 Subject: [PATCH 059/157] chore(deps): bump k8s.io/klog/v2 from 2.120.0 to 2.120.1 Bumps [k8s.io/klog/v2](https://github.com/kubernetes/klog) from 2.120.0 to 2.120.1. - [Release notes](https://github.com/kubernetes/klog/releases) - [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes/klog/compare/v2.120.0...v2.120.1) --- updated-dependencies: - dependency-name: k8s.io/klog/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 3cc6998cb..47c26bbb0 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 k8s.io/component-base v0.29.0 - k8s.io/klog/v2 v2.120.0 + k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 diff --git a/go.sum b/go.sum index 47abed1f1..b2d44af03 100644 --- a/go.sum +++ b/go.sum @@ -446,8 +446,8 @@ k8s.io/controller-manager v0.29.0 h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc k8s.io/controller-manager v0.29.0/go.mod h1:UKtadWkULF5bfX7vu3hHppzY/hz88C03t70GItg/x08= k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= -k8s.io/klog/v2 v2.120.0 h1:z+q5mfovBj1fKFxiRzsa2DsJLPIVMk/KFL81LMOfK+8= -k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/vendor/modules.txt b/vendor/modules.txt index 7b6315e15..f2edf8dbe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1408,7 +1408,7 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/klog/v2 v2.120.0 +# k8s.io/klog/v2 v2.120.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer From 2c2344d83c3661cb8668c004a386f24209bcb0fe Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 20 Jan 2024 03:33:42 +0000 Subject: [PATCH 060/157] fix: increase volume cloning timeout and reduce time cost fix --- pkg/blob/blob.go | 5 +++++ pkg/blob/blob_test.go | 26 ++++++++++++++------------ pkg/blob/controllerserver.go | 7 +++---- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 2f8bce6d7..702d63e61 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -176,6 +176,7 @@ type DriverOptions struct { EnableAznfsMount bool VolStatsCacheExpireInMinutes int SasTokenExpirationMinutes int + WaitForAzCopyTimeoutMinutes int EnableVolumeMountGroup bool FSGroupChangePolicy string } @@ -195,6 +196,7 @@ func (option *DriverOptions) AddFlags() { flag.BoolVar(&option.EnableAznfsMount, "enable-aznfs-mount", false, "replace nfs mount with aznfs mount") flag.IntVar(&option.VolStatsCacheExpireInMinutes, "vol-stats-cache-expire-in-minutes", 10, "The cache expire time in minutes for volume stats cache") flag.IntVar(&option.SasTokenExpirationMinutes, "sas-token-expiration-minutes", 1440, "sas token expiration minutes during volume cloning") + flag.IntVar(&option.WaitForAzCopyTimeoutMinutes, "wait-for-azcopy-timeout-minutes", 18, "timeout in minutes for waiting for azcopy to finish") flag.BoolVar(&option.EnableVolumeMountGroup, "enable-volume-mount-group", true, "indicates whether enabling VOLUME_MOUNT_GROUP") flag.StringVar(&option.FSGroupChangePolicy, "fsgroup-change-policy", "", "indicates how the volume's ownership will be changed by the driver, OnRootMismatch is the default value") } @@ -239,6 +241,8 @@ type Driver struct { azcopySasTokenCache azcache.Resource // sas expiry time for azcopy in volume clone sasTokenExpirationMinutes int + // timeout in minutes for waiting for azcopy to finish + waitForAzCopyTimeoutMinutes int // azcopy for provide exec mock for ut azcopy *util.Azcopy } @@ -261,6 +265,7 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p mountPermissions: options.MountPermissions, enableAznfsMount: options.EnableAznfsMount, sasTokenExpirationMinutes: options.SasTokenExpirationMinutes, + waitForAzCopyTimeoutMinutes: options.WaitForAzCopyTimeoutMinutes, fsGroupChangePolicy: options.FSGroupChangePolicy, azcopy: &util.Azcopy{}, KubeClient: kubeClient, diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 4283f0bbd..5c503e238 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -50,12 +50,13 @@ const ( func NewFakeDriver() *Driver { driverOptions := DriverOptions{ - NodeID: fakeNodeID, - DriverName: DefaultDriverName, - BlobfuseProxyEndpoint: "", - EnableBlobfuseProxy: false, - BlobfuseProxyConnTimout: 5, - EnableBlobMockMount: false, + NodeID: fakeNodeID, + DriverName: DefaultDriverName, + BlobfuseProxyEndpoint: "", + EnableBlobfuseProxy: false, + BlobfuseProxyConnTimout: 5, + WaitForAzCopyTimeoutMinutes: 1, + EnableBlobMockMount: false, } driver := NewDriver(&driverOptions, nil, &azure.Cloud{}) driver.Name = fakeDriverName @@ -79,12 +80,13 @@ func TestNewFakeDriver(t *testing.T) { func TestNewDriver(t *testing.T) { driverOptions := DriverOptions{ - NodeID: fakeNodeID, - DriverName: DefaultDriverName, - BlobfuseProxyEndpoint: "", - EnableBlobfuseProxy: false, - BlobfuseProxyConnTimout: 5, - EnableBlobMockMount: false, + NodeID: fakeNodeID, + DriverName: DefaultDriverName, + BlobfuseProxyEndpoint: "", + EnableBlobfuseProxy: false, + BlobfuseProxyConnTimout: 5, + WaitForAzCopyTimeoutMinutes: 1, + EnableBlobMockMount: false, } driver := NewDriver(&driverOptions, nil, &azure.Cloud{}) fakedriver := NewFakeDriver() diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index b55f25f0c..e5c503de0 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -62,8 +62,7 @@ const ( SPN = "SPN" authorizationPermissionMismatch = "AuthorizationPermissionMismatch" - waitForCopyInterval = 5 * time.Second - waitForCopyTimeout = 3 * time.Minute + waitForAzCopyInterval = 2 * time.Second ) // CreateVolume provisions a volume @@ -757,8 +756,8 @@ func (d *Driver) copyBlobContainer(req *csi.CreateVolumeRequest, accountSasToken return fmt.Errorf("srcContainerName(%s) or dstContainerName(%s) is empty", srcContainerName, dstContainerName) } - timeAfter := time.After(waitForCopyTimeout) - timeTick := time.Tick(waitForCopyInterval) + timeAfter := time.After(time.Duration(d.waitForAzCopyTimeoutMinutes) * time.Minute) + timeTick := time.Tick(waitForAzCopyInterval) srcPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, srcContainerName, accountSasToken) dstPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, dstContainerName, accountSasToken) From c7fd18a5b2c0ed92e9469c9452913b84d940f627 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Jan 2024 17:53:04 +0000 Subject: [PATCH 061/157] chore(deps): bump build-image/debian-base in /pkg/blobplugin Bumps build-image/debian-base from bookworm-v1.0.0 to bookworm-v1.0.1. --- updated-dependencies: - dependency-name: build-image/debian-base dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pkg/blobplugin/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 6d35e7cf6..b82c1978e 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.0 +FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.1 ARG ARCH=amd64 ARG binary=./_output/${ARCH}/blobplugin From 50ae95e2dbc89916c35e73f455d5bf3a333d3df0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 1 Feb 2024 09:23:34 +0000 Subject: [PATCH 062/157] cleanup: update new chart versions --- README.md | 6 +- charts/README.md | 2 +- charts/index.yaml | 107 +++--- charts/v1.21.7/blob-csi-driver-v1.21.7.tgz | Bin 0 -> 5517 bytes charts/v1.21.7/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 212 ++++++++++++ .../templates/csi-blob-driver.yaml | 16 + .../templates/csi-blob-node.yaml | 282 +++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 9 + .../serviceaccount-csi-blob-node.yaml | 9 + charts/v1.21.7/blob-csi-driver/values.yaml | 167 +++++++++ charts/v1.22.5/blob-csi-driver-v1.22.5.tgz | Bin 0 -> 5869 bytes charts/v1.22.5/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 215 ++++++++++++ .../templates/csi-blob-driver.yaml | 14 + .../templates/csi-blob-node.yaml | 323 +++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.22.5/blob-csi-driver/values.yaml | 180 ++++++++++ charts/v1.23.3/blob-csi-driver-v1.23.3.tgz | Bin 0 -> 5944 bytes charts/v1.23.3/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 216 ++++++++++++ .../templates/csi-blob-driver.yaml | 16 + .../templates/csi-blob-node.yaml | 326 ++++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.23.3/blob-csi-driver/values.yaml | 180 ++++++++++ deploy/v1.21.7/csi-blob-controller.yaml | 143 ++++++++ deploy/v1.21.7/csi-blob-driver.yaml | 11 + deploy/v1.21.7/csi-blob-node.yaml | 203 +++++++++++ deploy/v1.21.7/kustomization.yaml | 10 + deploy/v1.21.7/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.21.7/rbac-csi-blob-node.yaml | 30 ++ deploy/v1.22.5/csi-blob-controller.yaml | 143 ++++++++ deploy/v1.22.5/csi-blob-driver.yaml | 12 + deploy/v1.22.5/csi-blob-node.yaml | 203 +++++++++++ deploy/v1.22.5/kustomization.yaml | 10 + deploy/v1.22.5/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.22.5/rbac-csi-blob-node.yaml | 30 ++ deploy/v1.23.3/csi-blob-controller.yaml | 144 ++++++++ deploy/v1.23.3/csi-blob-driver.yaml | 12 + deploy/v1.23.3/csi-blob-node.yaml | 208 +++++++++++ deploy/v1.23.3/kustomization.yaml | 10 + deploy/v1.23.3/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.23.3/rbac-csi-blob-node.yaml | 30 ++ docs/install-blob-csi-driver.md | 6 +- docs/install-csi-driver-v1.21.7.md | 47 +++ docs/install-csi-driver-v1.22.5.md | 47 +++ docs/install-csi-driver-v1.23.3.md | 47 +++ 61 files changed, 4580 insertions(+), 47 deletions(-) create mode 100644 charts/v1.21.7/blob-csi-driver-v1.21.7.tgz create mode 100644 charts/v1.21.7/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.21.7/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.21.7/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.21.7/blob-csi-driver/values.yaml create mode 100644 charts/v1.22.5/blob-csi-driver-v1.22.5.tgz create mode 100644 charts/v1.22.5/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.22.5/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.22.5/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.22.5/blob-csi-driver/values.yaml create mode 100644 charts/v1.23.3/blob-csi-driver-v1.23.3.tgz create mode 100644 charts/v1.23.3/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.23.3/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.23.3/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.23.3/blob-csi-driver/values.yaml create mode 100644 deploy/v1.21.7/csi-blob-controller.yaml create mode 100644 deploy/v1.21.7/csi-blob-driver.yaml create mode 100644 deploy/v1.21.7/csi-blob-node.yaml create mode 100644 deploy/v1.21.7/kustomization.yaml create mode 100644 deploy/v1.21.7/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.21.7/rbac-csi-blob-node.yaml create mode 100644 deploy/v1.22.5/csi-blob-controller.yaml create mode 100644 deploy/v1.22.5/csi-blob-driver.yaml create mode 100644 deploy/v1.22.5/csi-blob-node.yaml create mode 100644 deploy/v1.22.5/kustomization.yaml create mode 100644 deploy/v1.22.5/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.22.5/rbac-csi-blob-node.yaml create mode 100644 deploy/v1.23.3/csi-blob-controller.yaml create mode 100644 deploy/v1.23.3/csi-blob-driver.yaml create mode 100644 deploy/v1.23.3/csi-blob-node.yaml create mode 100644 deploy/v1.23.3/kustomization.yaml create mode 100644 deploy/v1.23.3/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.23.3/rbac-csi-blob-node.yaml create mode 100644 docs/install-csi-driver-v1.21.7.md create mode 100644 docs/install-csi-driver-v1.22.5.md create mode 100644 docs/install-csi-driver-v1.23.3.md diff --git a/README.md b/README.md index 9123aa2b8..ffcee25ad 100644 --- a/README.md +++ b/README.md @@ -18,9 +18,9 @@ Disclaimer: Deploying this driver manually is not an officially supported Micros |driver version |Image | supported k8s version | |----------------|------------------------------------------------------|-----------------------| |master branch |mcr.microsoft.com/k8s/csi/blob-csi:latest | 1.21+ | -|v1.23.2 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.2 | 1.21+ | -|v1.22.4 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.4 | 1.21+ | -|v1.21.6 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.6 | 1.21+ | +|v1.23.3 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.3 | 1.21+ | +|v1.22.5 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.5 | 1.21+ | +|v1.21.7 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.7 | 1.21+ | ### Driver parameters Please refer to `blob.csi.azure.com` [driver parameters](./docs/driver-parameters.md) diff --git a/charts/README.md b/charts/README.md index d914f4c9b..4ad7df3f3 100644 --- a/charts/README.md +++ b/charts/README.md @@ -5,7 +5,7 @@ ### Tips - configure with [blobfuse-proxy](../deploy/blobfuse-proxy) to make blobfuse mount still available after driver restart - > Note: [blobfuse-proxy](../deploy/blobfuse-proxy) is supported on CoreOS(OpenShift) from v1.23.2 + > Note: [blobfuse-proxy](../deploy/blobfuse-proxy) is supported on CoreOS(OpenShift) from v1.23.3 - specify `node.enableBlobfuseProxy=true` together with [blobfuse-proxy](../deploy/blobfuse-proxy) - run controller on control plane node: `--set controller.runOnControlPlane=true` - set replica of controller as `1`: `--set controller.replicas=1` diff --git a/charts/index.yaml b/charts/index.yaml index d285d584f..de298b5b9 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -1,54 +1,72 @@ apiVersion: v1 entries: blob-csi-driver: + - apiVersion: v1 + appVersion: v1.23.3 + created: "2024-02-01T09:23:18.012841151Z" + description: Azure Blob Storage CSI driver + digest: 7f9c641bf822c5421fc291dd65cbf274bec66ab363078fba618a9ce7dec400eb + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz + version: v1.23.3 - apiVersion: v1 appVersion: v1.23.2 - created: "2023-11-24T09:01:28.995261524Z" + created: "2024-02-01T09:23:18.011599935Z" description: Azure Blob Storage CSI driver - digest: 9452372c032a09eefed4b4064a547143e6f62b7628992db22784844eb35bee7d + digest: 057d6658c5879ee7e564d59275366521dc0a2e311c0527e570eaccd544622e60 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.2/blob-csi-driver-v1.23.2.tgz version: v1.23.2 - apiVersion: v1 appVersion: v1.23.1 - created: "2023-11-24T09:01:28.994649337Z" + created: "2024-02-01T09:23:18.010964434Z" description: Azure Blob Storage CSI driver - digest: d0345309791c22d197c44e5d74b7acd62ce652b3a8756d08bf878f85adb0de7b + digest: 66215f12a4e3acdcf09416d817b737e14546058b081a2cfd8bf9ef507229ca07 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.1/blob-csi-driver-v1.23.1.tgz version: v1.23.1 - apiVersion: v1 appVersion: v1.23.0 - created: "2023-11-24T09:01:28.994017634Z" + created: "2024-02-01T09:23:18.010356842Z" description: Azure Blob Storage CSI driver digest: 57151e21e33660522f25694bd8ae985e5e17c7ffe09904ad2af4025e8bf1da72 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.0/blob-csi-driver-v1.23.0.tgz version: v1.23.0 + - apiVersion: v1 + appVersion: v1.22.5 + created: "2024-02-01T09:23:18.009743904Z" + description: Azure Blob Storage CSI driver + digest: 653f8ea6121aa2e9d1bee8fc1e80f30960f0e81b40d356a019da9fa03c4af5e4 + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.5/blob-csi-driver-v1.22.5.tgz + version: v1.22.5 - apiVersion: v1 appVersion: v1.22.4 - created: "2023-11-24T09:01:28.993398586Z" + created: "2024-02-01T09:23:18.009098476Z" description: Azure Blob Storage CSI driver - digest: 20a3f200282472f27ab45eccf74c87a205cc647aafd5ca481762b5cd3bc5249b + digest: 6c38e79d2f50616daac0658cfa5b1a569e6ff8ce8f24ed40f563e87fb1d1340a name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.4/blob-csi-driver-v1.22.4.tgz version: v1.22.4 - apiVersion: v1 appVersion: v1.22.3 - created: "2023-11-24T09:01:28.992740245Z" + created: "2024-02-01T09:23:18.008446506Z" description: Azure Blob Storage CSI driver - digest: d8dba916d9540447f2b7ba33e8f56dafe7459eeffb6a7323645f359b19b6a6eb + digest: 6cdee296d22ecd330f477f2ca6da51b07320c546c04ae46c23eef48146b772c1 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.3/blob-csi-driver-v1.22.3.tgz version: v1.22.3 - apiVersion: v1 appVersion: v1.22.2 - created: "2023-11-24T09:01:28.991669251Z" + created: "2024-02-01T09:23:18.007784088Z" description: Azure Blob Storage CSI driver digest: 259e66dc12db7310fe1c51e49c964398e0a6b7d511133916dd7d25f748f0b791 name: blob-csi-driver @@ -57,16 +75,25 @@ entries: version: v1.22.2 - apiVersion: v1 appVersion: v1.22.1 - created: "2023-11-24T09:01:28.990518632Z" + created: "2024-02-01T09:23:18.006937854Z" description: Azure Blob Storage CSI driver digest: 8329d477d55c82f97bb09fb172c5f39a1677bedc13c7410bd93b306194516438 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.1/blob-csi-driver-v1.22.1.tgz version: v1.22.1 + - apiVersion: v1 + appVersion: v1.21.7 + created: "2024-02-01T09:23:18.005346707Z" + description: Azure Blob Storage CSI driver + digest: 1095721182d611e2556c611dd330758d8130fe66493db4f9189586a9219896d3 + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.21.7/blob-csi-driver-v1.21.7.tgz + version: v1.21.7 - apiVersion: v1 appVersion: v1.21.6 - created: "2023-11-24T09:01:28.989918718Z" + created: "2024-02-01T09:23:18.004752018Z" description: Azure Blob Storage CSI driver digest: d5ba1f92795ec45970eb6e5fc54aa13a5684f9936216c064f8a3843bf722bf54 name: blob-csi-driver @@ -75,7 +102,7 @@ entries: version: v1.21.6 - apiVersion: v1 appVersion: v1.21.5 - created: "2023-11-24T09:01:28.989326165Z" + created: "2024-02-01T09:23:18.004171073Z" description: Azure Blob Storage CSI driver digest: b403e9d49abfe076ecd83d6dd50166347ee4305f33dc840019474b2876723b9b name: blob-csi-driver @@ -84,7 +111,7 @@ entries: version: v1.21.5 - apiVersion: v1 appVersion: v1.21.4 - created: "2023-11-24T09:01:28.988732801Z" + created: "2024-02-01T09:23:18.003597266Z" description: Azure Blob Storage CSI driver digest: e4fa13670caf6b0d3e9fefa55d100daa439cd7187dabd45318ab03c7d4b17710 name: blob-csi-driver @@ -93,7 +120,7 @@ entries: version: v1.21.4 - apiVersion: v1 appVersion: v1.20.3 - created: "2023-11-24T09:01:28.98813592Z" + created: "2024-02-01T09:23:18.002987933Z" description: Azure Blob Storage CSI driver digest: 8c2c20547b2e0e1b39d2f2efd04c1bd778f14af5feae2bda86d722dac3c02643 name: blob-csi-driver @@ -102,7 +129,7 @@ entries: version: v1.20.3 - apiVersion: v1 appVersion: v1.19.6 - created: "2023-11-24T09:01:28.986827359Z" + created: "2024-02-01T09:23:18.00174466Z" description: Azure Blob Storage CSI driver digest: 0007ef225b5658d3989aa6fdc3a91a4b33696a438eee46ad9a675af615cbdf21 name: blob-csi-driver @@ -111,7 +138,7 @@ entries: version: v1.19.6 - apiVersion: v1 appVersion: v1.19.5 - created: "2023-11-24T09:01:28.986187133Z" + created: "2024-02-01T09:23:18.001136542Z" description: Azure Blob Storage CSI driver digest: 183c3e5cd84b709f1455cc7c84ed5bd573e8a24149fd6442d38999835b0a1711 name: blob-csi-driver @@ -120,7 +147,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.0 - created: "2023-11-24T09:01:28.984655313Z" + created: "2024-02-01T09:23:18.000481131Z" description: Azure Blob Storage CSI driver digest: 3eac15488da5be7d1e78431929f7cda35bceb1af3fe107ffbd84606e047c9204 name: blob-csi-driver @@ -129,7 +156,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2023-11-24T09:01:28.983826321Z" + created: "2024-02-01T09:23:17.998975288Z" description: Azure Blob Storage CSI driver digest: 22cfa17fc5e8d771ff8edd26729266a9a8ee55c0e150df85ef15698f7fe985e9 name: blob-csi-driver @@ -138,7 +165,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2023-11-24T09:01:28.983050388Z" + created: "2024-02-01T09:23:17.997778881Z" description: Azure Blob Storage CSI driver digest: bf6249c0e3e3d3d009d4c79ceb7fda9a56c0565b969de753628792ea3ea5ece8 name: blob-csi-driver @@ -147,7 +174,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2023-11-24T09:01:28.982324298Z" + created: "2024-02-01T09:23:17.996897911Z" description: Azure Blob Storage CSI driver digest: 8daa35cd4957695cb64b45da05a15b4020df5545a8ac44c4668dad4bba82c8a9 name: blob-csi-driver @@ -156,7 +183,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2023-11-24T09:01:28.981585971Z" + created: "2024-02-01T09:23:17.995995384Z" description: Azure Blob Storage CSI driver digest: 442bc579b231aab626b9e474e2c0ed3f101d47d61c99aa9a7f863af7ce268d9d name: blob-csi-driver @@ -165,7 +192,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2023-11-24T09:01:28.980866908Z" + created: "2024-02-01T09:23:17.995159855Z" description: Azure Blob Storage CSI driver digest: b577b0b771138109aa90eb09d56fc07273ca0b584a263ee8f789e35796279f31 name: blob-csi-driver @@ -174,7 +201,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2023-11-24T09:01:28.980076348Z" + created: "2024-02-01T09:23:17.99433658Z" description: Azure Blob Storage CSI driver digest: 124e87af2581b374b89a39940698620c23d3eae6dcee518d302461ffea93e9a8 name: blob-csi-driver @@ -183,7 +210,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2023-11-24T09:01:28.979289047Z" + created: "2024-02-01T09:23:17.993482048Z" description: Azure Blob Storage CSI driver digest: 07c4d76017491b3d0bdd70de90e814096938bf7916da0c149c3805294bd57560 name: blob-csi-driver @@ -192,7 +219,7 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2023-11-24T09:01:28.978511783Z" + created: "2024-02-01T09:23:17.99257752Z" description: Azure Blob Storage CSI driver digest: 79716efa958385adf57eb3570843e1b4512d8c801e8e070625e94264f3e917a9 name: blob-csi-driver @@ -201,7 +228,7 @@ entries: version: v1.10.0 - apiVersion: v1 appVersion: v1.9.0 - created: "2023-11-24T09:01:29.002027136Z" + created: "2024-02-01T09:23:18.019074348Z" description: Azure Blob Storage CSI driver digest: fca0b9215d3277346f68c643fb3ead75158971f0d1945ab01ec559196f3cf842 name: blob-csi-driver @@ -210,7 +237,7 @@ entries: version: v1.9.0 - apiVersion: v1 appVersion: v1.8.0 - created: "2023-11-24T09:01:29.001031812Z" + created: "2024-02-01T09:23:18.018176202Z" description: Azure Blob Storage CSI driver digest: 3b78e2ab4f33577c54d4f57276c824717d2ad2aa3741210e938fcaf927bc751f name: blob-csi-driver @@ -219,7 +246,7 @@ entries: version: v1.8.0 - apiVersion: v1 appVersion: v1.7.0 - created: "2023-11-24T09:01:29.000277142Z" + created: "2024-02-01T09:23:18.017448813Z" description: Azure Blob Storage CSI driver digest: 28da5b55c3d2689d6da85eb7da344385e9cb99bdb2af18c24fea93670abfe7ea name: blob-csi-driver @@ -228,7 +255,7 @@ entries: version: v1.7.0 - apiVersion: v1 appVersion: v1.6.0 - created: "2023-11-24T09:01:28.999499839Z" + created: "2024-02-01T09:23:18.016717326Z" description: Azure Blob Storage CSI driver digest: 6f24f2e6623f6f8862e47d4fbdf13b5f351ceec6bb9a4591ef7fc2fca9fc1eef name: blob-csi-driver @@ -237,7 +264,7 @@ entries: version: v1.6.0 - apiVersion: v1 appVersion: v1.5.0 - created: "2023-11-24T09:01:28.997945662Z" + created: "2024-02-01T09:23:18.016021449Z" description: Azure Blob Storage CSI driver digest: 95d14c9b70b319760d388ea47727c8c97e9287867a8852aeb67b7175b52fe8f5 name: blob-csi-driver @@ -246,7 +273,7 @@ entries: version: v1.5.0 - apiVersion: v1 appVersion: v1.4.1 - created: "2023-11-24T09:01:28.997274031Z" + created: "2024-02-01T09:23:18.015308202Z" description: Azure Blob Storage CSI driver digest: 5fcf69c449f065fa1d5722e5a7fed8a28000efa790907e9ff4b552c5fbd16d22 name: blob-csi-driver @@ -255,7 +282,7 @@ entries: version: v1.4.1 - apiVersion: v1 appVersion: v1.4.0 - created: "2023-11-24T09:01:28.996582891Z" + created: "2024-02-01T09:23:18.014628729Z" description: Azure Blob Storage CSI driver digest: b466543344a6411f6130ba87b093955d39ab8614c6b4ed8505a0a0c96073cb33 name: blob-csi-driver @@ -264,7 +291,7 @@ entries: version: v1.4.0 - apiVersion: v1 appVersion: v1.3.0 - created: "2023-11-24T09:01:28.995930012Z" + created: "2024-02-01T09:23:18.013782983Z" description: Azure Blob Storage CSI driver digest: 58d02cb70a3a966b349d62e880b7149fb06ac009474e35e580784fd3c98a5b07 name: blob-csi-driver @@ -273,7 +300,7 @@ entries: version: v1.3.0 - apiVersion: v1 appVersion: v1.2.0 - created: "2023-11-24T09:01:28.987468867Z" + created: "2024-02-01T09:23:18.002358744Z" description: Azure Blob Storage CSI driver digest: 27fb89f20b5fddc7329e6d7c2374857b22c1d61592e397a53f47121eea68c344 name: blob-csi-driver @@ -282,7 +309,7 @@ entries: version: v1.2.0 - apiVersion: v1 appVersion: v1.1.0 - created: "2023-11-24T09:01:28.977742046Z" + created: "2024-02-01T09:23:17.991741748Z" description: Azure Blob Storage CSI driver digest: a251a55243de207c69ef53f72abee45e93b72fa4fc43dc204b7f1cdfd459acdb name: blob-csi-driver @@ -291,7 +318,7 @@ entries: version: v1.1.0 - apiVersion: v1 appVersion: v1.0.0 - created: "2023-11-24T09:01:28.9770962Z" + created: "2024-02-01T09:23:17.990943476Z" description: Azure Blob Storage CSI driver digest: e83f037a165eafc83a978bd7e6bf6221b052ac34363aecb12e6a73607dc58b89 name: blob-csi-driver @@ -300,11 +327,11 @@ entries: version: v1.0.0 - apiVersion: v1 appVersion: latest - created: "2023-11-24T09:01:28.976577418Z" + created: "2024-02-01T09:23:17.99033949Z" description: Azure Blob Storage CSI driver - digest: 1cc9092f913ecc7342ae96c0b054408a3c199bea6998d8adb3b7d5bb9a3edc02 + digest: 7e2732acd8fe3e6b26407065daa11adc28cfd6d67c83f60b38546ecf4bb88704 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2023-11-24T09:01:28.975686234Z" +generated: "2024-02-01T09:23:17.989245883Z" diff --git a/charts/v1.21.7/blob-csi-driver-v1.21.7.tgz b/charts/v1.21.7/blob-csi-driver-v1.21.7.tgz new file mode 100644 index 0000000000000000000000000000000000000000..bf0ac4f2e03df88a65c1762254303ea8dadae43d GIT binary patch literal 5517 zcmV;86>{nyiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBRbK5ww@I32Rpp@C#+5JM@e4AC3AAC`=t~j#Fa@ zIS%|EwpLoLR_pliQ2*a*wTl0@+Q&yfv=5JtI-SmO=j8B*R{OAheDVWoZI6WN5($a< z53N_@Dt7KqQgBE<0~dtRE}FG#I1DpC8=ZFJq~?KeISHkHc=mh50iFA7gocuFJOOku zyh4V1Y7__1MFpO!&597eb*J}bQHt_E!+r!}y9!{r{I^@3ql2RSA9fD+^1q8h0z83k z4WSVha0?;pqQK>ifVi9qHkJ*S1?sKOMyeJ<96~0DWPIL5O(sP1AsPXvAVDZTY9doWep)$kO5~iqErR&^^hsX4bw&$oCl4AMr##H zpUi-Q5ChIeupY*wNvN7+@O2tLuf|822bWsTZec=%S zngBi{E}Xe8i>OrN>vF&nx=8W}&>QqG)fM&kTNKwt*i;ti3-Nu`XVb((W5&^(MGZ6r zsfJ^WeF2D!)kE|JQ~fX!pnh}2zR-rk$Ef7Y_knlSOglyv7Y`kQ(Ho?QMQn_QF$pof z@t7z08P~%fGi?T(X2T5t)%Xaz#X(Wv7bb&djSEGot85Jg8dJ(7R=+e)V9(S4`96|U zKq2!)Eo9zVTC0nmp37i-JOY0iglk>FG)9htE+QdWP-sV}WGDcMJuGp})m1VL5qg6j zeijZ6!?FpaXu5_OXoCr(0Vj+TIlu6+(C~o@WmoCRR74>W@fn!`;viVWT|i>WxLlX} zT}E99WhwxAbec*C!Wgv^a)3Zvb%+3st)4Nx?o7pM+xdcB|D!&$V);=UOcz5({Rth=$@zrJgSIc%@R^MOY~?N53>n zjaOY;gxG~HQWN5s@v9-lYwSxe2-WB=+AP?A%7pBL{KELdLQlvk5sUG0S>$_4pgbb* z$Olg;fs}CMF;L?wkq04A4>5{94zH5tHKaI}6ITovmtEBP$MM0TedU`1_T@CKrq&V; zp-)^ax=8V!(Ak!IMOQ$iNPa2e z1HCpKwOSuZ{FK9Q5r}LpH;fb)1f^!3q?M(=)yUefM3x#$)i^q@#8F(xzt+&<@t7!a zj#Zq}{zFizmGStIWIpg%`)Vi-dVqNs)%8ljnGcP;h6*$X)nkk-RIL@Xdhd}4DUug3 z9)l~psNQEocM4wQLwzmyv2cWn8NWIf=M`vbdTA}ICH455DJ1>&iu#1YXXZx%e1Q3x z>w*yfB651=0hO8^5+M)p<0pz|nD|ONby0gg?Q&4k4?q=wbDxdIk$?ebj~OhZ++z)q zP$9AJCk+-INGmTq8TGoYMaR;lQ*0wZe#DPAA<%ckhkg`f8U=+H^?#VXpJ^HE>rbJF zC0w2_+9jwiusekTmeVe32HH5sq0-=YB!HpBL3l-Vl}nP*wt=#+2EYR%gt2#8CnqOm z?YdKaSGleE#wzx8NH)@Z76nin1C|ljvtW4q-S9nUQJCzTlDRc6--9%AHvw{d+dN{BXSx z$9f&TK?|8i#wVm6|3sORTJg8H=(2bI>HS-D)qi*M7X9n&wtv-s{}%ObZ*OjuS)**z zScWwb>OuOu<`Wt{s@^Eo>kS+7QN9|-D*yk%ai`$_J32mT?frkdD0fp>?$bv^ph>(J@gVfs9P}1iZ&l;n zVWU>NW5}I?`>+C-+1pAiBk>l6K41ZA2hpv?s=$>#avYkNE~igVsBsIrUSnazPoK(%58E{TKKz}G>nY!rsK>tYs zW4fTalL7Ay8?wF~W4G_)A%6sd9K5n*AR$)kBnvGMLTYIeePl@wXX36*11|BG<` zBI@}Ln7`wV1NuWdO_Y|Z|Gzq_-`6+k3=0BAQKHyH6_H5=Wb^dP0e8 z=jNt?tc~N8z{3SIOh;uV*#6FCCe-W7(GoHG11+vd`m^n{qM9|t&{xv-)Wtk?BoAC zDG!8tU394p`v9n{WfOvM7>eesT}y&qsry06TB1x+_Mq|dU~0wA$0`%PhT_%2_)c`# zY!z69%FBkoST6oRlu!?-M2D)n2tfxN(;$W&eDdC)NLHb4?BbK|^g2W4+JxFjEw6AX z$;YN)B7ZUZ%)o0`SB{bw(6a8p{ak92YVAdwG*PF8RV97D1YSlP=Rs$JxM`bA{I_da z#|q80;vZ_7D?Yr2T(YjFy$nG~}3Jq~(#Nh3`Hy zHq~5RM3xO@u7NTa2CBfkjOnriRxs99F`6Z4pP8)E+pMLTOm{wDz93zItE}t+XKF*3 zK1WDx4!h_!qLKt~X|{7q`Q8k3XGVUf%YG!@pT7`hY#)4#bH%t81*`7qkFToXR43 z+U~T37r(wiPsR!eJC=*Ad`W&YL3%vz5MjfbinDO&O|HHXCjU!Oqry0ro~XN=mP0@ zglAr-5lLh*7IOAiT(LlI#*AopvIHlCmIOH+lZ?5B#OK1I+ZFgFpv$tCue_nSZwuo( z!k!oFDuy&Q^=vG}?m(0(m|OrSWL%!wt&=>pY3_h!WR~)O9?mB0zl6b6sg{ln*gr?@ zO77PwlWXw2w)MBl>Sc&!e$#a}WbDo0ueWA7vnrO0qu=&L1gIA>Lgnd?KV1y3?tA^q z!Od0w&bF+@`F0CoX*Kk}t{rVAJI{{U_+`|TX9+h?lTDs&71l03ClbINJOL^#tqbV_ zc0efvM`*vb1yCWwpn@Wuv1Afb9pg@PirvZ#j5VL>Wsp--VNLK$tO%VJ=rQ@)m$+Ex^K33YS&M&e+fRG55Z;B6J2!B%GHS% zxW(&s{&fhPFwfq&+6?^vpKg2i7q`93UjOdu?E3!Q)pgHi6u#puDE3wFkH%#35r-dO zehcGrg=AXd96}DBqjq?fdOG=0jh^Kl(^=!#&!oZ?JCaqBcB3|;(ng`E zZPMH#RIe8qFCpNccXxx~{owZI-~X}WFKe+3+yYSR`m%z;bqIVpeJ>#|&s6Vk?uVBj z?yqm&-(UAW_paB)TFwuv7zX$))e%bPp}I~58A|LvXeC%V6o$ZF|MS)DP5)!Bf2Y}g zaCZ04iv18sH*@NtsYw(Hk4D`1uV8fb0Ijwj7qP6J?U{;=3mvrrN@aPwvm%7Q#Mo7o z3E?+g;IgRRIbiRG`g4nZST0E!Vzq;?VKM+weGDGsc(OsHTlbM!;aYuFNzKiDW3R-u-u_};@0Zng zwhNEgU!5tYo)3;uU=E>?00%f{{8Z-}|MlDYtCF$*-Z$$hsTJa82(@^st@P=gg--Q< zD$A=?#ZQ?qoynV>s}~OA!@ne42OZ`&+~iIz>TI-f*j4dL8q0dwB5ND04qGFqdKFj+ zY{inWDsWhc&QynOY%E)BY>mifDEQWC2>bGl${P7U@kU_tD?wJ}{~vXZi}^q8_CEiA zC&h7`T*{9yIs1)=pH1#!yJj+kE{0d;h}ZIDpuAtQkm9pJ?hcj`yVG0vEg~Ge^sH3K zyem3pH&hz}3wyetLgSNH4a>1BP25=I4r0ACu8?fXfLX+^$r#+Z>%&Lp!Q!mfK#iG@ z=3))?pchUd0FM1Xkvd&N{wed|RTl!S$p1TPm+t>LIXc|u|L>&4`F|J!Mu)IL`k&^G zZ6^K|0B)o8{2c%eKhl4USS>q>STFUbj1^lh3A zSV|)^oVj^|T$;X_pjN=nN>9z{aAjazMx4+|d_x1FldB13i^si3a3g&SVs6KmCGZY- zAc|oZ$LNCIW5^*swQmWk#P>}o)na*@y;`Er`L~ zve~c}@y*v+EyCu(_nWe)`?IBF#TU!08pMkwxsCC~6;5(>gcGXv`{a*m=WNNmT}fu897O}^sz1CtyS~0ZzrHzt_i1R2Am_(k^Jwmpl}6#V6pJg_AWM+k zf9~B5uWtGqAuC+jQi-hCv(7fU)>+xN&Q?0tS=GB?@9zHN*}v~^2ED$Hf!_B&ecT9T z>Drn~oC@EoPzCGZxx5;lonQCvKMgL=?s}K!8{sP4sZ&6r=7K9Wg7UVDXsP^Zr9*9 z%MDc9<+_y!%A8gL9VGXg zb#7Z}{rL@Hw&h|2q_a7f0x@2I0<03vc3*)7oT^Q{(;t8O@aepF-@m!+-S^Kv_H39p zJw~ub(!QqoYozN}174D-|NR|&Sm)?~ai^_uXdur>6Oc(BmylSBEXaKDGk>jQ1)=)> zQEgEQiG$E_u{#B&Ax7SF0!ewGtbJ`-381+t?Q-P)kj!^}NT#qCUvfNVk+1*a@a@w0 z!qPPHY}F+(hE=KNwmq`5wAHIzHhBYY#c6^i>S%KkH=>|I8@+mK*1um7qcWo*V@>fz!XM{EMdm1e+4y21ox z{Vl5dMCkwZXC%}gW}MVto-zqQx7z@r>thnEjn(9~+i>O2?$|KLHC!JHv1UKl>=Vp@ z$_+*??#)|N53A6v?v^WvvPQPrp;rNP8RH(IS&^{e&}SpIGH5l&acX60Jtx#7bn-QX zws7C3Gm24de&j0}{TEyrORee3Fqd6kOHIm`+)BI7krbtEVjo*s<-JCKC2X-|+^Z(< ztE$@g`iDP*nq5OhY+1{`3PaO;-l&LZ!(Q0+>gyEh&b5=`?_KMr?J)TN0;NW1e4yN91ME&I#h*2)VDgx!f2|mhEpYJ5#^wLO8oLprti#h!PkTRvrL4RE zf6XEA75o48!9k~Z|HI+o!G8b0i?U+>zte?4r|EuAOTaf${Mm8%UQ~YeVDC;b*c6+b?cTq|Kp}e7&t^|ctBda1=xDc zVQyr3R8em|NM&qo0PH>6a@#nv^Q^BxDYLb+dqUlO$*ijU;8;pl6-RbiPGGL;{UDo>B)EPDi9tf9{Q0j;0zeOC-Tc3^5NHUJ6fG$P@ zWTdA?aR6OZkg3|di1DQxeJIOPod0v|M+~@HpQDffPoY~w zXp9BiK*+i%aCsvjE@y&GWW!~Fdh4^XfonS}^#m#Npr02d*GD zAjogq^e{ISawEEyRL%8SH?AsF$#!r6fh1*fXhJ& z^>_>@65ye)GoYFbp;M&ARVWl3CS5d>G8ElrhUo@jO^>;vDY5t>nl7UgGL1MmA@dxU zQHco!-c4LNge@ug?>^^QGdTiF)YFcSfH=O_f?xs6%S1qM++7;&bv znI~!?^UhPKE_!$@qw(<={8bn(HGrv%90y%QLb9Mxk5I`_01|sx;+m^LG8GYeg}(eK z92|ya8%XhV33JdE6GqpZFizy+!pB162P%|dr3X_ng+#>XWDbafU=eo#i5cT^Q|WgZ zbs?1L1kkAAuVwXf1DW zFVR$Ss``_KA>%Tl>ZN3eM-(k59l`=pMmb{=jTcCOlxj+2HR(0F3xiEj7%vdq;~?|_ zb=B_zNEbaQ+~?wxP@>*$wc6;hRt9>k)iP6JDQuRhq2yAjrwiR*>8$P|tQ44|Uz)YX ztA-XKcA<;ZfHA2O&@oF^WEp21)fAQWDFFE3O%rUDWx<>Cv%$>6-!el7|9^oBP9hvsahwgWod6UvGx;*rPfk4jm{Hk6c_Su)pd9>A!<6u zDo$zt5h&Hlc>G8*A9$>NH5LbbfkhY9^-95+4~@Kr3bX{(V~Q+PwH3B{|DFgbk{2+U zfGfMGK4c?z243VteIxwwhX&yB>i?keL~?g^P>Phz~bC> zL5QD-oDDpnQj0?}$pd`JN!!QamHG{&7`ajGz z(A11I_ovXq5_)f!^%8m)*qy;Oma{Hu2HH%=q0;5IB!H2`K{%iqq(?G(H&C|LHSmB4 zVJx54n>TODdiGg0U*!hpg_Z47iQwPpM0~~4oq25fZCkXH!nNuhGoj8E>mT)w#K>K3 z0$4^|%fdvw=PU}79o!9I?81>WarG`8|pz6LQ~0^{dgynSmdux4Qm&HtK!2T&jwC9lb(J zeS?g{NxA>HKCm7`}gv`ZqUMH_GHvc5|#>G!yDU z`n%>68r`ebD7EQ>?4srz^QKS6&G<=8&uPh77kMxynAU2_fb$vlmI_jCWLHL(*{tZ@5_hl=WJ=C z4Ivi#QEahmf>P}`h1x-CkU>)w@L@Cis>kQGmW>R9)vzyfcBthPerV1 z8nxOjL+%XRFKdt)*M`J060cF{0~Vk*klk9WZC&Xj$DxVkdiL;u8aJTf8cTZ?Jw7Uy z3^Klv~`=GFLrF#C~&A<Fk%AQvH=1*%)z8FbO#n~#sRng;9NhnRZWc%ZTCSKAvpWkAC{ zJUGaM388@M+6Yu|b>yV4aO6wWFj)qAYBoNbh@*nMngi#=1N6JHRH=(jkMy4;FxDZe zJ1H`tp8i|&R=dznWy6c%PO%zCo|R>!V+rF$%CT|oHfnapHAqS<UZ^B8pDEsDZ9-mHc`c70$`lb1{)5;f}_IN3HpN%yF^>FPnDUkEJT9)-HOu8i{)=TgQ0(63|BCKWb_ReO>Zs87eXOW9?i) zav7OVGK}vkR8S;tu zH%ogpnNqLWmq}G@i)^ulrP_Jo6+@Q`W7{P%1CrLu=gbU?$|e6ggDu67bC53wK5v3C z(M8&;TP0{&b&v+RGlH}_mKi&EbA3}S)kR|2;Q0n5bAiVS%BzGf+hGO!aGju8(nKZj<58r)Vr$7vL(x{+ctj2Th+Nr1qm-bQ4iY0_d3?7lk?vFC9R;_Uk$&U?L^{%G_! z3!o3!1MWba8nzx{16QO4iIP+n$+LE+B`mMuUUKZ1G&m<&;?MGH;}Teh?r}(*Uqf+r z(pm*Ej<}F#?N$y5=)BmCMtb9J%DqD-oYqCnIQO$5nERzD2-lgM&vj#*=7;8ocdo;T zIxJZlW)s;q;qWArI4y>I#TQ7&BRumujeH}Ev5+(QV!#5s88f2Y$ugXDS`timO#bf% zGd>r7-|vK9fx0~R^1wSz_kB^^WU}X{b(KIGNIe?MX?G+_04Dpy2^p7XcIzb1ZJMoO z>6w-CpU1O{_^%LfRjOyl4&tBVcCGa5B;5^UUT-hR>!F!3V}w=)i6#(d9YDW+v6W%wRsw@AsT=86u`ZL_^o zyMBWG9{6}M0+&%wbkR{O2NN%F%h&Dv>kv3$p1pFl8Th}SZu)l@H~n6Jcsn@1yn8pe z?AwgOcbo;qw(9-SgiJr;@B=JvU{VH1h9%Bg<={DLhi8G)>9(r(Ebo%MsZ7dcu_0L{ zX*X&kO76SOX0<61FKrZx+9u5yL-l%5@Dc|8d3$?3y1Tx)`q#hh^vhZ-gSUXxy1uO8 za1#c%=WiSK@TcBgaCdq2{_e8>xqrDS)pFKfMK{2&sg6)O57l+{$w*@Ni&lb_ zU11pP4?hoXu7)4`!&@!(*XOtYtk@5MbTj8{nwmwS=xD-?{|H9cchGw4aTz{)wmnm^ za-pGCMCn}K@2CjjuP}BMXF~W*7q~2{cMjOQqW;37A6Cz#471un*f4pWs6GLYas0fj zW{^AKm*OB634~ur)5VVDk&sR8Q*dQrM7Hmk9rh4PX4h^Vvgb%Q^OMD=TlVfFv*NY- zsuIZEePd6gwb}k+Q}376clI0}vA;SqOg$eQqre!2GZ{h`qk%csw>lXp?<6dw`0S9o zgQdjo>;`^~2nR1c{1`HCKqu^qW)HTNjlD5HLD| z9n$}_bnG+nuYhnTwGru))+5QWglT+ST$$5^PR$|g#DXdwUN7>If1hv^r=(;N?vgoJ zYUzvQ3|hE2WDCAnwxG3PFOkp}v%q*>P%M`;*Z|T)=lK-D>p?H)iCUdUm;v;tNLKYE z2Q1l|Y?DB!Sll&ru#|IVX3p-}bZM?9`;bF?m4qb%6JLiS&2<3flHNctWO6{QF!5P^>pMkZ zPPmM)@1&>fQz^+z2i@=4)LF}1&%>;r@8-cDH@{K0XHS`pw(hje>r~57A}>}XLdI9B zILUn`PN?c1GDWIAxh213;hM&zxGV@T^-50PX1=SNYJ?hMmi4?=iefD2bK=8P?WqcZ zfl3E#um5&%KD>K(b2Yr}4|{e;Y@ISns05B(N#>=5MuTZE9NnH@Uf#XEyn6fY)5scb z&iB2R(cB$Cjlw-X7QeG0mJqr7+`kzOu7*1iE8HklNvzni&OVyfS=+YGUK-a~*Sb;v z_U_~PzwWNC`$HWty&Hb|xD(0JT}qWS6}=~s3O1wD8;s81UiR-kUH8s!`@Oe2(JEZ@ zR3NrKj-ad`Clafo2nwWG1j*G0>8`72J&XP7Hz*$x0XOz5^Pm=l)!I6eS2w4W8WpPctf!qG#$te zv7DWK|3mL)@VS3;chi4=H@dp`aCd!v`_D3lyo{~L8if0F8=n-b6=yK7x!HTa4N0&B z%!4yKs)BmoF)#6C6MJ%}fPQAdwlvp{Da?ZfG(>6F^kQbyW z$Rz1YNG!#fB_eCPS+s&teJQTCD22p9=(yOOfzps=R;%vMwPV&Q)d|zfbV~?}x9zU# zN}?}MP8$9N77o_;Cp%w>oH@h;wWP}OyxQ#M<&_<-@2NG-shGAo&-@e4hHPb}?{+HW za5m#`Hlu{9Vl`WjHNU7G4u5RK-!6wg@{9fE;yNMVdlhn8_)J|HKvyPdf< z$Hw8|c*?gpp0aDe;W>`Atl9!vw^y8nc#6$858o0`XD(|kXJtXx(f0PYJ6)4U6y(B0 zX=RhorHq~ZO61{a&Wj(-vB$xy^DPp_v&^wjHkaS_Yiet;slR5GXDty#WlWY{=P~+Z z+zgl(`B3c5Byuoz?CRbobUw+BiN|bWU zS9-=aU&r=72cLIwD-Hbe)X~;?B`X7;D-XM5L01Gn*BzL#NwKrC){}h6`3>WmGf^wd zo84L5iGvF3=E)s(mws|{7q%+hWgo8KR?Ko^6Ki31K^P8BY&ccey-Pl(U>y!|IhVsdB!icWMiN?@UaG% zf~>zrb)N|RzupR{KTJQVzdU8xfUdU#LD$D5*qEw0+hn7aKf7t897DK17GlG8ZrCQ6 z1C=}UT-=)1s2*04Ti+~K5VaidwLwop=rYDVLbFrCMnj*C+e@d_9LK4ZXX{%+Jwm77 zV$v2aKX%43sojtKM&tj0E8|&fnlj3r-l?Sq&o)EP!W69vQMJWG@rLC656p9cD?#Eh5GE;Ny+zf>!$q(__WFPO zoxocCfAXfF|2rqghwuMCOL;NDc zVQyr3R8em|NM&qo0PH<&b0Rm=`OL3SvAMe3Jv)H$%jT%^2d`l_)Os-m-p$>UO1Z>L z13J%06G_9GIR5WXNt$8aVZf}t$%FDC>zP(}OVet#`bA7!HnD6$EQgZ?@aE+lbJ+HMM(aF)-@pq`TKNiYcASCAB zwVq5XJGehd!#?>8To6V(XmM1-zMuHB(LQb**BlTwC%)7VFMba=ptmlYps{2e&j4MH z`^ZR7jbaZvC?iv~MHb^r5BktnExY~~*bPAJR{^YF|3^nhtyXsZpB%Lg>;E|#@$d{f zHH0QuzzBTSL7vSU9tPAVDZOYDNO3 zfhC^Bt6n|u1J@lgm)Oe=>Q4tu4mk**vWAc?$dI!IQK|xXHDXF|{kT(t=5gb+(b`1P zB@3V+#E`QIRHGPm@l}@uy>{cMwV5Dg4s`WG8^MeS$?+BgRh`3SYVoLhiiICHb6^WH z0zrP?W`?=xkouw`V2eR(tgdjbI8Ks0~Om#3&%fk;gb70X7FI z)Z+=DK!Ah3%z_M$2 zfG>y*7q-m;D%Jej9I%8Ak~{$P3jM5xqW=9Fg|G-4V1d36*HvRST^uxJ94%SUKx2?< zI;PkafXGxmL|-t~4+8<}Z;sd(+EDZum7KXQ@Q(V@mRXFehnB$T6;jM1Hbvu*g^=HH z&LjGa>*)`LHi1ry;SPvue1h%lq$u<&n?Z}lrQ+09#)d+TDPeBg;aUfsoA0^dnR<UZJ}m zg@t`TZv!cwu3!P$V!~+13FAaAFI_A&exO1bR{CQqrjQ8uf-C@W5G>#}ATeiLR+WC6 zQ5$?2F91C|O|=O8khJA<08g8B&6=O&XQ01`SPBWAWxIipz%82-No?#Y>SM421+C=` z?j@QjPSt!e-)CF~RK1i8@qnV$qC;39$|z?{qRA2okWwvatQNgScWJOG3X>&*d+hlx zppN=G1JXec3ipNhB$TKhwOU8$v6csVtkn`rVkK+^OGC+}Qcst9zT#EgL0Bm;OTRRG zjZ+RSd~8DpsSjeA`Kuu%YwSzU^VRIG`mDr%&V(F*{KEL%N>9i+5v%#JS>QTqK{-U= zi3^TW0x{#rVW8$!A_sh+4q_C29QUK{HKZh#6I%=!mmSpp*V*w&@zys7?83y;ZqL31t5}8wjU@d@N(VSQLlpWmJ>UABC*(7DyPwYB8}`u{9Wv7SJtrnlC-T2hZ+Od;vFed-blpP3tY@Bx+= zwhco3Oys=p0F_!CqDAiE`%e@vFmaW3>Y$@)+T}Q>AAl+ZZ(TN-1_Fkh-7i;6O;S%a zLPCYat{cIxKx5#QC!>DUIBA^Li@LQ7`nBVZ?Z#1~Z8{2&AMyRQ5A+>zVGwwUo?hlf z{a@x3XnKZ<`;+fr3Ej83L0w{d4nr*G9n|!+m5zO-%5R7VV~IV#Pc=xFB-E~_Osyes zj|gE5pVpf1-|19*!P1^PZ1#0Ww3EIqR6AxpT`1N+>H~?9 zyV?}647iqssd&#>;713z5n$(MP9$7Y8`NrlH@;c=u)_5oq)`cDiPf>l$YRZyxSf?N~XiXJ*5RAF3?6r#-C}!%4#~}K9989O=zJu!5Xl1VdsQqyZ%a@Ii(ObDj6nzG+qhMkpu)Eb!pmHlo= z<`w#dX>%L9zoPzB+shXhUA5Sijb+X%`mn?;ECiWRaFB;7o++Cz#EXbA_6XJeg*t*s zpz1t`g??!gx1A$l){;XM^OiSmn}hiP_(Ba z)HRJ-?S>(H4))y!WX822u?)m(#ro;ug^x@o7(_+t|gZ|lke5}R1%GL3XBVX}-lHqPBft(b8Q5)<<|p>m4qe-qZ< zL_IwK^Y8G?i2l$%Go_{K|5Hcx+xjkpVV=j7;bs(@C}T1LFivQL4f}q^QDKbohX)j` z?bNqvpu$f2m`rWcF{@0=SvC&yGJf3zpXw!{wi`HQqbcFcW}eTeK0+k?>v=dIb7`^Z z)*}yXUu&{Vk3EWVM7&Rh)yEE+;6 zpN%9t;YvydCd_Lj*?+L-u7Ow)PJLOk?ES!XSAn@HK1Z2nif_2$jlVgG-Q zc1NhwL0xUwdq8C^nGlA3Uo;m-wJ7EpyLi-EicQE;He4DBLZK6H7-; zH=#u+ziqgy?cxta33Y%n|jqn0W1SOmVUWFI6Y*$6S|KYiC_tjmW>1>|{4Ixr)q38a%U+#qZY)A+Z6ZQ76&{;y#ItgHFNLnd$ZiN>a@X$Oxc%sJ(6lA1QM7 zCdfzb-z4qzWOB1syiKZBw8>^inCqP;Uh?TuVeDdwOn}7w(m6AOta3@ePGE~Mq&(!S z51%%{nCKww)-4mXq&kR$+zCNiAIl6qys5bCJHktYi1s zC)RJiI6rNzgO~(d$n&FC5l|TQvu4V;KmUnQ#)qFrV7r)jCjOUW0O%t71&rW_Gn{&d zOen5{nqk^#LooX;D?`IXy67hHh*L|`lCjVsNFCD5Nr;Jxn^1a`gd8`+z2Xa`AsGVdunzSeoZ<*BL3d$lCy5Fy^Sc5vhY}3FyYKnbPtfD8< z8lg-e4W#}Ws}*}JasVcA#qt@K=f#Fin%g+hrC?^(%6}TqF59Xp7BirbCS zuk(8=$h{ZnB81wR`VRjVk-SAi2?yNygq&|6)aq~1fk``^?+97F~B!n4IS}kt5KdtPr5))IHe77Cu*vhDgevJX5bNDRqe7Q`~D?wkF z5SYQN^j}UwSJtt~>wAiNM63PTK>VJ0=W`~R&D;*Uxg74oMp||YDTg2xDHmqenKs45 z4LID)IfyxP9h2Y~x4D%5dyt(hC8L^4CP0s^=pCU%66|)t#mh0+j5?x&j$0|1aD!XD zK1#p#ffMEwcP=*r|M%0VcY8VNb$f%G{>9bpyZ%+L$S8crnU@`_K3Yx5>?8I+z;XoB zJV5kW!h}~2j-?KV1#mi3SIu6*yC~->k*=8?NK#1_8?^x?_nl_5+>{8nHVRp76K9*D zdOa(65d%Nn+ziLJ!_oD>|5Hpquf-C0Ge`^fmkk_NVQ_o?wqY+VRPV2E$K4ONSJ&@v zuX>+*S5>LzGX_hh0e*{hgwlDat`k?r65DrL36@TUVX!y&+#g*JKK2GTTI`1xH$Rn} zhd|ni5}l@IQ7Af^aO1y%(ajUI*?L?>-k$Bul8N6ZrN??}_emgIquP3?`a zW#)@)KQTL;A(YH+JUS#xP^-Dv!orxnyVnZw7P`42$lcv)Po!1td{NbLR_Jzn4rkim ztvRNS3zkt}7NLOv3pi)|TxXyE{bl`C(cFLQhxM4%lI3O?EpT!d@~2N0x*X*pzg`Pf z{G16hn6%lsdE+qd|1RM=Ua`F9Cev#Lv(wH|tco}CnAgkYAfzE>DgH}|L5p%|I70<%d%1_ zKf>hfH|~Bkxr;|NlOc3D?wi8C^~peKPhTd*XNTM!EG4$*Bls;K9GtlN(PvJdPT4h0 zR<`Bp*%FL};jcyt!mwzzzAG{XduhAykvXs`tQ)F%5z^e9q8{}8Ie5Uas|)e&fR0iX zeip)kLqI3aVxM%HO}!0WbZ-=F@7|tcI)iUWfmK-llsWLE3!&Dp|KrwauKwrE>B%Ag z|5;j?|A)b2bPPMB|7q#iXX0N5;ZABJ(52R6$+3i4STC+@VnS!83Oh1`vc>CJKGN?a zj>43bB*I-X2XiBRk(|K-E)Lm(FP1G>IIx#U=nGk3JS`{%moyXs#8v0%6v1J?n{p?u z&m&9#I#fivdXxheoiDaYAXF^wnmU-vIWsF~_iVa2*E1rmkX_`Snrj4<0n`mRp|kJ; z5khDE8D*=-y?d|&eM@KRjF)8a2Dm4(ffmc?s@}cNA-qeXAdC`Tha$~=0QnYQK`>yl zPYYqfV*SE*io$|$8DQ6n%j{z*Nh}9F@5$8Jg0-H8*}UG(gKsy#QIBV@nT={@F4tr? zh8Lw+(JdvGuNM0uhojubn)4WD?p}`3j|XL0p)r zgHpz2U+Nm|_TKg{2Dk4<*Mpnhpj+%Bty4w`mB6tr$s(7eXfX8$%mUMGWP_P63dRPy^o=_H;%2nm%+8S z9o@PxZ;ob>IZ#Gu+`GB`c=7Ms>tS!8W2U!*Pak(0Tka~RQksh1lSp~h=ydzzi?>(3 z+fT#p#Z9mKb|+eyd!jPLHir_F?PLXFT_8b$G_xSt{+7#Zye>u=VQJ<8Q@B9do3O+n z>oV;#xwA38=SZsDGCqDt?EP~m_UeUMHD^uXS)8{k2j?{MU2|xQFMzHfZw1$~?N{nH z639zJQI{Ph@M}=tUK->0c@8W*&#XS04u<&p+f!;71r z@)*)GwkGos?qe@P)C%oQgL%Wn;`<#)gvDbHoEM|YsP`Rn8&9iPzB?raBzF9Q;TGdc z2%m`rheJn?k4@hVW`v4nX$Yl=iLa7W5O+>}k@YB&pu6h8f~9zoww>fB(aWPj7p-gX?bZc5v~rS44T&x`hf=0-EQq zQ6Er_cui%%w^wOVrB=as^D0y+qy=dPGD;j15=(Jj5D+f9*|daFeJQWDD22qHZ`s(M zgVKs^!O*`Q{L2Z~I+0l|N^hg9XV7byV+)}=}qlWKvIdnT?$Cjn|*b0 zof2@J5;@KMN?sm7cQ!_`@WIyVN&hHN5r>Bom#EHLUuNz$ic)ae!t)Hri@dgu&`;{`LmJ} z5&ina!R=A*_9zRirzWue@>|YUqLgC3)H7CnAKUvHeBRA1_u)$`mkPq6YpYIM#ta$;+HRN^YpQT4VU^&f-oSl-NB_o~YaSw?KDctJHzMp0OvD z!L700Do;l$C1LfWRB7=jHS-R2crR0jx4l`mK?*DF57osnw|`h`Ewi<(l^J_gYD5pk zi-oPc{)bPnZSCa}@D25UXUAv9nfkw@H?70{-_O!AD?xqqhPjwhU)&!S|Cyip88H`9 z1wtAc2Xf9h`CV^!iMqkHRYSUGXna_3FM(6+CNERf7vJt)KggAi=;$8@z&_C>f1-na zsn>tim=Xr~FLi%a;}@EeA+s%m0Htb|*opBGV5K-{t}QgL_pIzDb^@BcYDesehgKS$ee{(shu zK-VFBOIyGTDgNXvkw{ra-3IH|&@Dxw*CjH;a{r=-g>)`)= zo|cP*qz%1zC&;8g7AlgpC$tSVdz0 zaTSgY+(yz#bPt<;Klh+o|2l1Z{lEQ5V59y&eUs7u?bFl4{eREWUQGYT2IWEXA2k1= a9onHC+Mzvj`@aAH0RR8<&(ur+r~m-9Nu$C5 literal 0 HcmV?d00001 diff --git a/charts/v1.23.3/blob-csi-driver/Chart.yaml b/charts/v1.23.3/blob-csi-driver/Chart.yaml new file mode 100644 index 000000000..ecfd21292 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.23.3 +description: Azure Blob Storage CSI driver +name: blob-csi-driver +version: v1.23.3 diff --git a/charts/v1.23.3/blob-csi-driver/templates/NOTES.txt b/charts/v1.23.3/blob-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..9ad135dd4 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Blob Storage CSI driver is getting deployed to your cluster. + +To check Azure Blob Storage CSI driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.23.3/blob-csi-driver/templates/_helpers.tpl b/charts/v1.23.3/blob-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..d99392f32 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "blob.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "blob.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common selectors. +*/}} +{{- define "blob.selectorLabels" -}} +app.kubernetes.io/name: {{ template "blob.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels. +*/}} +{{- define "blob.labels" -}} +{{- include "blob.selectorLabels" . }} +app.kubernetes.io/component: csi-driver +app.kubernetes.io/part-of: {{ template "blob.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +helm.sh/chart: {{ template "blob.chart" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- end -}} + + +{{/* pull secrets for containers */}} +{{- define "blob.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.23.3/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-controller.yaml new file mode 100644 index 000000000..9ece72de3 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-controller.yaml @@ -0,0 +1,216 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--timeout=1200s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + - "--feature-gates=HonorPVReclaimPolicy=true" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.23.3/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-driver.yaml new file mode 100644 index 000000000..9c5de5b91 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-driver.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + labels: + {{- include "blob.labels" . | nindent 4 }} +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: {{ .Values.feature.fsGroupPolicy }} + volumeLifecycleModes: + - Persistent + - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange diff --git a/charts/v1.23.3/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-node.yaml new file mode 100644 index 000000000..842f76e47 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/csi-blob-node.yaml @@ -0,0 +1,326 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.node.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.node.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if or .Values.node.enableBlobfuseProxy .Values.node.enableAznfsMount }} + hostPID: true + {{- end }} + hostNetwork: true + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.node.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + {{- if .Values.node.affinity }} +{{- toYaml .Values.node.affinity | nindent 8 }} + {{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + initContainers: + - name: install-blobfuse-proxy +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE + value: "{{ .Values.node.blobfuseProxy.installBlobfuse }}" + - name: BLOBFUSE_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuseVersion }}" + - name: INSTALL_BLOBFUSE2 + value: "{{ .Values.node.blobfuseProxy.installBlobfuse2 }}" + - name: BLOBFUSE2_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuse2Version }}" + - name: INSTALL_BLOBFUSE_PROXY + value: "{{ .Values.node.enableBlobfuseProxy }}" + - name: SET_MAX_OPEN_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.setMaxOpenFileNum }}" + - name: MAX_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" + - name: DISABLE_UPDATEDB + value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-usr-local + mountPath: /host/usr/local + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}" + - "--append-timestamp-cache-dir={{ .Values.node.appendTimeStampInCacheDir }}" + - "--mount-permissions={{ .Values.node.mountPermissions }}" + - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" + - "--enable-aznfs-mount={{ .Values.node.enableAznfsMount }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true + {{- end }} + resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} +{{- if .Values.node.enableAznfsMount }} + - name: aznfswatchdog +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + command: + - "aznfswatchdog" + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + resources: {{- toYaml .Values.node.resources.aznfswatchdog | nindent 12 }} + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir +{{- end }} + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-usr-local + hostPath: + path: /usr/local + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: {{ .Values.node.blobfuseCachePath }} + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..833dcc640 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-controller.yaml @@ -0,0 +1,115 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..c041cf8db --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/rbac-csi-blob-node.yaml @@ -0,0 +1,29 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml new file mode 100644 index 000000000..7433bccf1 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml new file mode 100644 index 000000000..a25090e30 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.23.3/blob-csi-driver/values.yaml b/charts/v1.23.3/blob-csi-driver/values.yaml new file mode 100644 index 000000000..70d51e7a9 --- /dev/null +++ b/charts/v1.23.3/blob-csi-driver/values.yaml @@ -0,0 +1,180 @@ +image: + baseRepo: mcr.microsoft.com + blob: + repository: /oss/kubernetes-csi/blob-csi + tag: v1.23.3 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: myRegistryKeySecretName + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-blob-controller-sa # Name of Service Account to be created or used + node: csi-blob-node-sa # Name of Service Account to be created or used + +rbac: + create: true + name: blob + +## Collection of annotations to add to all the pods +podAnnotations: {} +## Collection of labels to add to all the pods +podLabels: {} +# -- Custom labels to add into metadata +customLabels: {} + # k8s-app: blob-csi-driver + +## Leverage a PriorityClass to ensure your pods survive resource shortages +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: system-cluster-critical +## Security context give the opportunity to run container as nonroot by setting a securityContext +## by example : +## securityContext: { runAsUser: 1001 } +securityContext: {} + +controller: + name: csi-blob-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + metricsPort: 29634 + livenessProbe: + healthPort: 29632 + replicas: 2 + runOnMaster: false + runOnControlPlane: false + logLevel: 5 + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + +node: + name: csi-blob-node + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + allowInlineVolumeKeyAccessWithIdentity: false + maxUnavailable: 1 + livenessProbe: + healthPort: 29633 + logLevel: 5 + enableBlobfuseProxy: true + blobfuseProxy: + installBlobfuse: true + blobfuseVersion: "1.4.5" + installBlobfuse2: true + blobfuse2Version: "2.1.2" + setMaxOpenFileNum: true + maxOpenFileNum: "9000000" + disableUpdateDB: true + blobfuseCachePath: /mnt + appendTimeStampInCacheDir: false + mountPermissions: 0777 + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + aznfswatchdog: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - operator: "Exists" + enableAznfsMount: true + +feature: + fsGroupPolicy: ReadWriteOnceWithFSType + enableGetVolumeStats: false + +driver: + name: blob.csi.azure.com + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + +linux: + kubelet: /var/lib/kubelet + distro: debian + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/deploy/v1.21.7/csi-blob-controller.yaml b/deploy/v1.21.7/csi-blob-controller.yaml new file mode 100644 index 000000000..2c5c19642 --- /dev/null +++ b/deploy/v1.21.7/csi-blob-controller.yaml @@ -0,0 +1,143 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-blob-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-blob-controller + template: + metadata: + labels: + app: csi-blob-controller + spec: + hostNetwork: true + serviceAccountName: csi-blob-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.3.0 + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--timeout=120s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29632 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.7 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29634" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29632 + name: healthz + protocol: TCP + - containerPort: 29634 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.5.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.21.7/csi-blob-driver.yaml b/deploy/v1.21.7/csi-blob-driver.yaml new file mode 100644 index 000000000..7b216feab --- /dev/null +++ b/deploy/v1.21.7/csi-blob-driver.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: blob.csi.azure.com +spec: + attachRequired: false + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/deploy/v1.21.7/csi-blob-node.yaml b/deploy/v1.21.7/csi-blob-node.yaml new file mode 100644 index 000000000..263019f36 --- /dev/null +++ b/deploy/v1.21.7/csi-blob-node.yaml @@ -0,0 +1,203 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-blob-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-blob-node + template: + metadata: + labels: + app: csi-blob-node + spec: + hostNetwork: true + hostPID: true + dnsPolicy: Default + serviceAccountName: csi-blob-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + initContainers: + - name: install-blobfuse-proxy + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.7 + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE_PROXY + value: "true" + - name: INSTALL_BLOBFUSE + value: "true" + - name: BLOBFUSE_VERSION + value: "" + - name: INSTALL_BLOBFUSE2 + value: "true" + - name: BLOBFUSE2_VERSION + value: "" + - name: SET_MAX_OPEN_FILE_NUM + value: "true" + - name: MAX_FILE_NUM + value: "9000000" + - name: DISABLE_UPDATEDB + value: "true" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.8.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29633 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.6.2 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/blob.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.7 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--enable-blobfuse-proxy=false" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29633 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + resources: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: /var/lib/kubelet/plugins/blob.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /mnt + type: DirectoryOrCreate + name: blob-cache +--- diff --git a/deploy/v1.21.7/kustomization.yaml b/deploy/v1.21.7/kustomization.yaml new file mode 100644 index 000000000..8b7f5fcac --- /dev/null +++ b/deploy/v1.21.7/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - csi-blob-controller.yaml + - csi-blob-driver.yaml + - csi-blob-node.yaml + - rbac-csi-blob-controller.yaml + - rbac-csi-blob-node.yaml + - blobfuse-proxy.yaml diff --git a/deploy/v1.21.7/rbac-csi-blob-controller.yaml b/deploy/v1.21.7/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..89c2f1f38 --- /dev/null +++ b/deploy/v1.21.7/rbac-csi-blob-controller.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-resizer-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.21.7/rbac-csi-blob-node.yaml b/deploy/v1.21.7/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..ce06d862c --- /dev/null +++ b/deploy/v1.21.7/rbac-csi-blob-node.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.22.5/csi-blob-controller.yaml b/deploy/v1.22.5/csi-blob-controller.yaml new file mode 100644 index 000000000..8834d6088 --- /dev/null +++ b/deploy/v1.22.5/csi-blob-controller.yaml @@ -0,0 +1,143 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-blob-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-blob-controller + template: + metadata: + labels: + app: csi-blob-controller + spec: + hostNetwork: true + serviceAccountName: csi-blob-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--timeout=120s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29632 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.5 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29634" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29632 + name: healthz + protocol: TCP + - containerPort: 29634 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.22.5/csi-blob-driver.yaml b/deploy/v1.22.5/csi-blob-driver.yaml new file mode 100644 index 000000000..d2de725d8 --- /dev/null +++ b/deploy/v1.22.5/csi-blob-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: blob.csi.azure.com +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: ReadWriteOnceWithFSType + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/deploy/v1.22.5/csi-blob-node.yaml b/deploy/v1.22.5/csi-blob-node.yaml new file mode 100644 index 000000000..bd5a35724 --- /dev/null +++ b/deploy/v1.22.5/csi-blob-node.yaml @@ -0,0 +1,203 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-blob-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-blob-node + template: + metadata: + labels: + app: csi-blob-node + spec: + hostNetwork: true + hostPID: true + dnsPolicy: Default + serviceAccountName: csi-blob-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + initContainers: + - name: install-blobfuse-proxy + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.5 + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE_PROXY + value: "true" + - name: INSTALL_BLOBFUSE + value: "true" + - name: BLOBFUSE_VERSION + value: "1.4.5" + - name: INSTALL_BLOBFUSE2 + value: "true" + - name: BLOBFUSE2_VERSION + value: "2.0.5" + - name: SET_MAX_OPEN_FILE_NUM + value: "true" + - name: MAX_FILE_NUM + value: "9000000" + - name: DISABLE_UPDATEDB + value: "true" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29633 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/blob.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.5 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--enable-blobfuse-proxy=false" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29633 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + resources: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: /var/lib/kubelet/plugins/blob.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /mnt + type: DirectoryOrCreate + name: blob-cache +--- diff --git a/deploy/v1.22.5/kustomization.yaml b/deploy/v1.22.5/kustomization.yaml new file mode 100644 index 000000000..8b7f5fcac --- /dev/null +++ b/deploy/v1.22.5/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - csi-blob-controller.yaml + - csi-blob-driver.yaml + - csi-blob-node.yaml + - rbac-csi-blob-controller.yaml + - rbac-csi-blob-node.yaml + - blobfuse-proxy.yaml diff --git a/deploy/v1.22.5/rbac-csi-blob-controller.yaml b/deploy/v1.22.5/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..89c2f1f38 --- /dev/null +++ b/deploy/v1.22.5/rbac-csi-blob-controller.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-resizer-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.22.5/rbac-csi-blob-node.yaml b/deploy/v1.22.5/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..ce06d862c --- /dev/null +++ b/deploy/v1.22.5/rbac-csi-blob-node.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.23.3/csi-blob-controller.yaml b/deploy/v1.23.3/csi-blob-controller.yaml new file mode 100644 index 000000000..06a11ae38 --- /dev/null +++ b/deploy/v1.23.3/csi-blob-controller.yaml @@ -0,0 +1,144 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-blob-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-blob-controller + template: + metadata: + labels: + app: csi-blob-controller + spec: + hostNetwork: true + serviceAccountName: csi-blob-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0 + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--timeout=600s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + - "--feature-gates=HonorPVReclaimPolicy=true" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29632 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.3 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29634" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29632 + name: healthz + protocol: TCP + - containerPort: 29634 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.23.3/csi-blob-driver.yaml b/deploy/v1.23.3/csi-blob-driver.yaml new file mode 100644 index 000000000..d2de725d8 --- /dev/null +++ b/deploy/v1.23.3/csi-blob-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: blob.csi.azure.com +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: ReadWriteOnceWithFSType + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/deploy/v1.23.3/csi-blob-node.yaml b/deploy/v1.23.3/csi-blob-node.yaml new file mode 100644 index 000000000..78e675987 --- /dev/null +++ b/deploy/v1.23.3/csi-blob-node.yaml @@ -0,0 +1,208 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-blob-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-blob-node + template: + metadata: + labels: + app: csi-blob-node + spec: + hostNetwork: true + hostPID: true + dnsPolicy: Default + serviceAccountName: csi-blob-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + initContainers: + - name: install-blobfuse-proxy + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.3 + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE_PROXY + value: "true" + - name: INSTALL_BLOBFUSE + value: "true" + - name: BLOBFUSE_VERSION + value: "1.4.5" + - name: INSTALL_BLOBFUSE2 + value: "true" + - name: BLOBFUSE2_VERSION + value: "2.1.2" + - name: SET_MAX_OPEN_FILE_NUM + value: "true" + - name: MAX_FILE_NUM + value: "9000000" + - name: DISABLE_UPDATEDB + value: "true" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-usr-local + mountPath: /host/usr/local + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29633 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/blob.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.3 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--enable-blobfuse-proxy=false" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29633 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + resources: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-usr-local + hostPath: + path: /usr/local + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: /var/lib/kubelet/plugins/blob.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /mnt + type: DirectoryOrCreate + name: blob-cache +--- diff --git a/deploy/v1.23.3/kustomization.yaml b/deploy/v1.23.3/kustomization.yaml new file mode 100644 index 000000000..8b7f5fcac --- /dev/null +++ b/deploy/v1.23.3/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - csi-blob-controller.yaml + - csi-blob-driver.yaml + - csi-blob-node.yaml + - rbac-csi-blob-controller.yaml + - rbac-csi-blob-node.yaml + - blobfuse-proxy.yaml diff --git a/deploy/v1.23.3/rbac-csi-blob-controller.yaml b/deploy/v1.23.3/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..89c2f1f38 --- /dev/null +++ b/deploy/v1.23.3/rbac-csi-blob-controller.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-resizer-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.23.3/rbac-csi-blob-node.yaml b/deploy/v1.23.3/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..ce06d862c --- /dev/null +++ b/deploy/v1.23.3/rbac-csi-blob-node.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/docs/install-blob-csi-driver.md b/docs/install-blob-csi-driver.md index 702b04b51..839dc42bc 100644 --- a/docs/install-blob-csi-driver.md +++ b/docs/install-blob-csi-driver.md @@ -4,6 +4,6 @@ > - please use helm install method for more customization, e.g. Azure Stack, RedHat OpenShift support. > - [install CSI driver master version](./install-csi-driver-master.md) (only for testing purpose) - - [install v1.23.2 CSI driver](./install-csi-driver-v1.23.2.md) - - [install v1.22.4 CSI driver](./install-csi-driver-v1.22.4.md) - - [install v1.21.6 CSI driver](./install-csi-driver-v1.21.6.md) + - [install v1.23.3 CSI driver](./install-csi-driver-v1.23.3.md) + - [install v1.22.5 CSI driver](./install-csi-driver-v1.22.5.md) + - [install v1.21.7 CSI driver](./install-csi-driver-v1.21.7.md) diff --git a/docs/install-csi-driver-v1.21.7.md b/docs/install-csi-driver-v1.21.7.md new file mode 100644 index 000000000..7b19c8b34 --- /dev/null +++ b/docs/install-csi-driver-v1.21.7.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.21.7 version on a kubernetes cluster +> `blobfuse-proxy` is only available for debian based agent nodes, remove `blobfuse-proxy` parameter in installation steps if it's not applicable. +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.21.7/deploy/install-driver.sh | bash -s v1.21.7 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.21.7 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.21.7/deploy/uninstall-driver.sh | bash -s v1.21.7 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.21.7 +./deploy/uninstall-driver.sh v1.21.7 local +``` diff --git a/docs/install-csi-driver-v1.22.5.md b/docs/install-csi-driver-v1.22.5.md new file mode 100644 index 000000000..045eb2ec7 --- /dev/null +++ b/docs/install-csi-driver-v1.22.5.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.22.5 version on a kubernetes cluster +> `blobfuse-proxy` is only available for debian based agent nodes, remove `blobfuse-proxy` parameter in installation steps if it's not applicable. +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.22.5/deploy/install-driver.sh | bash -s v1.22.5 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.22.5 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.22.5/deploy/uninstall-driver.sh | bash -s v1.22.5 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.22.5 +./deploy/uninstall-driver.sh v1.22.5 local +``` diff --git a/docs/install-csi-driver-v1.23.3.md b/docs/install-csi-driver-v1.23.3.md new file mode 100644 index 000000000..ce1537060 --- /dev/null +++ b/docs/install-csi-driver-v1.23.3.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.23.3 version on a kubernetes cluster +> `blobfuse-proxy` is supported on CoreOS(OpenShift) from v1.23.2 +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.23.3/deploy/install-driver.sh | bash -s v1.23.3 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.23.3 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.23.3/deploy/uninstall-driver.sh | bash -s v1.23.3 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.23.3 +./deploy/uninstall-driver.sh v1.23.3 local +``` From 7008dbb919d1e3f6b512ab88f076492e9df6e597 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 1 Feb 2024 09:32:54 +0000 Subject: [PATCH 063/157] test: fix sanity test failure --- test/sanity/run-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh index 018cd1dbd..7462ee4d4 100755 --- a/test/sanity/run-test.sh +++ b/test/sanity/run-test.sh @@ -36,7 +36,7 @@ azcopyPath="/usr/local/bin/azcopy" if [ ! -f "$azcopyPath" ]; then azcopyTarFile="azcopy.tar.gz" echo 'Downloading azcopy...' - wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_arm64_10.22.2.tar.gz + wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_amd64_10.22.2.tar.gz tar -zxvf $azcopyTarFile mv ./azcopy*/azcopy /usr/local/bin/azcopy rm -rf ./$azcopyTarFile From a43105e5bdd5cd7df74b561b436916149dbad404 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 2 Feb 2024 03:35:21 +0000 Subject: [PATCH 064/157] feat: install blobfuse 2.1.2 as default version --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5969 -> 5969 bytes charts/latest/blob-csi-driver/values.yaml | 2 +- charts/v1.22.5/blob-csi-driver-v1.22.5.tgz | Bin 5869 -> 5871 bytes charts/v1.22.5/blob-csi-driver/values.yaml | 2 +- charts/v1.23.3/blob-csi-driver-v1.23.3.tgz | Bin 5944 -> 5944 bytes charts/v1.23.3/blob-csi-driver/values.yaml | 2 +- deploy/csi-blob-node.yaml | 2 +- deploy/v1.22.5/csi-blob-node.yaml | 2 +- deploy/v1.23.3/csi-blob-node.yaml | 2 +- 9 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index 3c231de5d9de32603ae219b3b162dfe399c62ef7..f0c8aae12aaf4f7d6a2b9a97513c39cfb8181f86 100644 GIT binary patch delta 5846 zcmV;{7Afh`F3~QKuYcrC>$H8;ZhzNmAGc55e1}^5W1+kSLSp`1>&djTgZq;-?32&H z1!1&{7VR4L{nei?mLO!!0bz6EOa1Kp_kaU>>#_+NOUCgG(8YLwjPTSb_MnS0B2`}3}Xrb8yLIS8QYr7g%cXA7eA1MmU@Oo--PFab_Mf>0zBJ5>Id z5aOD%2~?vPb${_ymjt~|qutp|kTM5)dU1_lMug;ei-D@n;nK8t)IH5Yg)!TLj6jgz zx0zyYIwWYd8~{T z$Fdofm{8zNh0oH+ET5AFmJrS9vRTv5uLIW|8*aMjmtSkIkcG^q<}JgoxBLK012V)O z)M^6wg4l3w+bp0`&9BV?OXwoW13<6P&uS>@|F2O9i?9I}=nHXOHD=SrK~u)jk_8Pk z2C1fFiho@Jh)mT(^aWG>Fc6^r=7@cv4MmSp$(ic{@2W3tnZ>wzXbFs7A;m0WQ#1}) z2>A`?JfhFIp8im36X>)Y?trMqC)mzTibB7#6trkuC{Arz2S$`I~2!v!_rXQh_ArB;Wu*5Z6gCsg4 z^a|bmC@k#zdCN!fbO{U4b`nO{oG?!0^1{VJ;|D5~Ii)|QVhV|XFUSHA2f+ev0}^w_ zWmW078MVQe@dD7Z(^QMV4@p}-2k^8p*R1(Teunysh^3I=S+*Mp3EZ+dk;KNXqCN&Y zP=C-`-r!!Mnc`H!=0g5P>HyI7$h`j3bAEnpcS&@PRsrQS@;Yk_v9ahP{Z~M*=tr#Np=%UU^zg#)bM0aZ@hg|jl+3$bbfR$?Nfw4&(SL@o z`0|Jp(O-&rPw!19t=2~pKIQOR03r!x`+<@IFW0RT^(q)|IkEN=iNz*TIgQQ}X=FF@ zuQhdeIwfj3hbm5K|1l`l%6RxlG8cHLeKi&f-NCYp>UyW(%!Ni;LwQ<)>LEqutKJG* zy?;-H6wwQqPQjL4R3EajJqIUnp?_Wpf4De&C5&I4iuD9CHNCaw)slMrVhTyW9Z;80 z_{`kEgAcGgw`~yOXCmhV2dLEI5G`^K-+!Wbfr+cMQx~<1RL)5?m2;F+8bB4+w=SDZ z0|D2Z-7i<1PEt=bLPCYat{cIxKx5#QPov&$95+ttMcq0D{W@{SPNUOknSYJ~8a<174TDu;NR&~e8JM5J8brKN3@d? zF4RS4J)JAoKk8G8k-ORyuzw7=mW8Q!&spF{r@9ef=VwkNTu~d83V1iZS^BWT^&X_r z6Jv?hS;@#^&Dg!&l!4!{1!-vY75Wm44Xup0^BE{+>War8`gR02jH3!#I zZ*~9oY~1^BxzZH%I(mgxItUrhm3sUGWlEulf4oM${@YLQU!%eB-G9|<^z->>I2gWv zjrya})kv9P%9ak*i)KPSi2ts+ga-F&G)k4aAiJo!z`W^_Ni%#>(`#CC)78v_+4PiY0^0vP8Hkp4GAMXY6%_DTf3-KGu0SJPU2T=G|JymZ zK5)^H{u4t%8`gj4==3u_4EMD|HG3a`a`?Vlz*10|Gzq_-_~~-4D&pu%r&Ff zL>ZG2fbmBgY{fdTiyj_Ow6;^BBL#O~*e9WcArdy9Zw0Nz_GClSv;1TgYjlTT(Drce?;VS<@Vas*-1N{~- znVQwd$A6kS_cX-3sl~8p2%&s7lI(;lDH)?MuaRW`!JfMYVnsOhWzDkp1J_-}=%$Rv z&V7N3zq22Q(Gnj!<}e#;Q-pKzH{FS2I)k4Ckp&i$5J`Tk!JFWE6I zWc~i%X?0Fd())j>_2$jN{(qKsN2t?9J#E-~K!0T|nGlA3Uo;o(S`_|_9Xo2RMam== z4;pR{1}bzzmznT26z&$rb)^HSo6sVZ-!|OUcJT+IggQVaI#%Gq2OWBj!&$}XqxXhH zv z_J1*sz6i7txs96IFJG5DSVc_?{!lxYkX!}nBMqL}$l~{FhLG3*(x?*|1aY54@&$ff10|_u24ndNR3LE8ZqmE81kUBh2+q6EFF6sfc#5 zL?%Gue(Cg?VOF`MUnj7|7*c-m)rU`;V1G<>k#^>m30hJe#PRNgAgvE;hJM}D+>}do zmRK^5UV&sP23bORozNvGEMp&T610Mhk;KQ!y-gao(dSNQYphroU@OD^nlp6$HpNiODtnqbyDI_?UaQ4tIAGw8S&?_*gd+;0mb7&;Dqp_W^`3pNXUx zsA;cTi?dEW_fXrf*l{=rarXTW=e^#jKOX<10MG^O0Jk8_659+>!4+A6L`f%OO1RI*yg20y`sRErx&@b`uszyOU%%nt!w?{BD^v z;tI+l6|mp0u2_RQziiXMJ8Fu3QLLgT(i)*mAPuDc8mkq1Bys>IUB&VlmuJO>O`6*{ zy`^Ag*2;ex&o1J>M!;1oUmZJ$e~R0U(yvo}E6BV)ne0{6^BD8;reSsz?A`EJ+wQDI znaG7%5XJisP{(J4%Cql(xPKTAZu`UD_0?c_Q?w(7Nn5MMqD!dM2c52W#dI! zMcqOS=4o^!E!x1fL((#-2Q#3upm!#1q!>|*!4lfbUVtc*X;Q+G&a^X$)Ru96TG?SG zCZ;eCZ#&Gfl~ED>8UsY9^I72ea+#u6g1#^}FoRj?znp}wtYeeS_kR@ih*tZvf%rZ1 z&gV=ro4H+db8)>38)?}sq#S}&q+FO!XWA4KH{ftH=OE_Hbxc}g+~!jH??HC6l#FUF znE*YuqIZN6NwC`k7ca+PGwO&gI%=h0!VPZqx}ARQ11HQW?p$sL{{K&-{_Vx6-|G)= z2IrTz?*^CsBBSsfXMbLHtomp*C9{v%{{YJoO!ENIX9;s(IXIR&92UUoq+K<81@EFv zt3;A!b|6V5S!~n>l-zfl&2m#B+}bE)wN0F3hU)dK;6)7lbaQh(zP%n@{rf+~^z&LQ zfj5J+aDUmrVHF0q=WiSK(n9tA>UP}waC>?6{`Ru}xqn%eYJWaYuw)wGw^&Cgormf= zab+yAeW#UR=~Ng7`@_$J(be!{e|V$C{`&mpr;_s!NIOxj)6^^qMMo2E{8uo#d4e`u zkE_Vrvz?i;oihV1L=>;({eBf8+%?9o;!Fs)X#WWA~X74Vw zzpTG1n)`44upYBovfK=#1y1fl{`AR07oj}l*K47QpMNu929q{BH*Xxq{of;8$19ds z++=#KV0PL$idFGO9`ky+JcPN;?yxs@%6EZ{$Tn;Vn_`Zcz)pF@r$}V8oeP7x2@YO5 zY2q)`D&+r!ruFVu!feX_KRG?k=Kr)?r-%HX=V+E?rBZ%`$=Pq*{b+I*+clFRbTJ;7 za=!J+K!0gZUna$8huj@3CAQ}y_$?qDoVe!EXU>35*%eLJv*oJT5{!l6uSN>OuxPfv zOELv}X}j=|Ij}0HyH@ieq`5{#J?Q&$@PK1im*3q19i{5}EQAAxfKHsnKIt}_dK)_L z-6+`py*DXuDUk2e$Y9r7E)?>-BgjrZ6u54mLXQuW#GJ~>p>{&k2?<0=Fl$0dGT`~uABYlya z!2&K0*@7>YEm%0Pmq_RfSztUZCp?H&PFkNwm;iLBh<5cT2P`^Y zY=4tLs94-JbugE6W>(Jb*>rKPXGB^dJI_5emj@^Vs26ZTXW?BUgw6&t%2tp2_h1M5 zn$FZ2FUjBya8G0dEtb(${d=E7c!NYi7$v+JMVeax@-4o2V94Zv7Q%!j`i1Wlg$3a< zz^)Y+*T+(lSPpvLlc}=>YdsCKdA*wl-+yj?qaM#*GaJ>+T&&4%46jGAqPs~fUoG}S z4oA6%SeG z4{zU%u7)@LVXxRlTBnQ>DuH8Ll0_~_(O?=3$2aGfm$z>(uin1uelbXD@^6Y&*JjVcs0gB6FaO z(71nd`|7R*m`Z6XdQT$dRio1zjL+X*_HRF3_s(zny|+8j z%G?f>A+|Y`pll~A5bFX73Z$6@$$$2@TxR2SG0F%_GY^=;1=8MxB?eiSX`jiRjqyE4 zQstKM@k3(opF6QvFU+brYYNZetW!BSr;+cPLtA_qbOm`UxQuPTQn!&nUJ{DB>?na> zgZlQ;7{||ZVBvXY_0e=N+{bc$_WcjN(cp7`bUW(5za3v)e7L+(ICS** z*!10CMyP0(hER%__$o;Sap%+*S&t$Kx~mQ>Sc->gf@i2EF#FuT>Il+D*JRK1xneC6 zbDGHD%JE$7%u%MdF_Kx3mQ%UqS&e6c{U}<4}le6`O$hk!vP+O|ZuYap*cR;V5aD6+j z8BWQvO$7#at4A^uQd)_$WueMe=KgLaDu+UmL!n3xRmrxv8EblXJXE4o;%}Eql(f}Q zU0kOGT&F}%Ge5VN2hg33buE0bwR+M&G*ralp;qQ=)XMA{V|k7mn{{W&!gCsDKAvIo zzerLrV^h7j&-0h)hWri9)6OWj}9_=Of=2($n`(Eu}$_1CEG5`Urp)h4w1kC`X+KS%BRp!@AW zP~X8>nX0L6Dx#IH-7KOULf9@AqT)C!jtLe(x{N%+6&!X%LgyEp(_utC(e<%Rh2*6W3!JG7dEBE`4 z$E}0^_jy__fRQ%z;+-Iqxmc)3)}GKd*pS;_9JPfK-1cswYWEVQF8^8A&7il4lbVpV zTu*G`SOUdWI5uz_Nhi@gZ2JA&gKGWjwC(l(_9uaj`v2rjM*nwCjt{^8|19mr^nYwn g9yI?!^B>xw9onHC+B3KR3jhHB|DnFVDgdYe00g0)H2?qr delta 5846 zcmV;{7Afh`F3~QKuYaWd=A?CUa{OJZecV2M^BrpKkA?CU2#NW3ttZpU4(?CVuunb% z7lhF+TC{7}_g82ZYUuFZHwY-vbWlt;;58EE&f$Ko{cyGQv}%*n=+0h*WK{ z!ryAN8m-z(5Bbm%ExY~~*bPAJR{^YF|Lu0W-N~;1<4*gq{(qmN5f9IxTSI7q1&qLF zUF6xk;SrlN!KSidGf%yB*+fAh#KC8RNXD04)V%vqG;KkekrZhNiDxV2q5k#**S%&g zv6o#mm=2k|<{*Hom$o3+oGpmb55NluFd>?E!2~!32||%j^ejK_6q8&WYxS~$q)Qe+ zL5OS4CQyxH)PKcST@v&BTjI84;4>Ee5JOhfCAqQTH?p6~=4}G6F$< z-)4%r>5!n+Zv3lpw4PMWc3I$r+r)J+afyAQp44iuUZD|8fdjPxNro5nR82PDAe zAccB70Tc*u(3d$-O@`1J(lRO(3J%jQnoH@6ZZpAjgMYB5!)(!%Slo%G&FGZO0uGkX z9Lr`@VnTs86+TNNvwTh#SVA=ol^N2p#tT5tPE#!cKO}AW9Kh4YT(jmU`5EdjB9=meXW4EbByh{-L=qdjiuxGr zKz~7Nd4qe2W{OiapUn3emjP8TB||)*Xtn4N7Kk#+8Ix$TL;|E#OB$<1uhCr?Y>L8U ziQpc4z6+?U{?34O(SyQ$Eg`smjUH=xpvPJ*u_RW)Rv)MT1(89+ybdmZXmYKgAQnJRr^nX2H&F-qtO8n%|Lj<0<;3y>!GmabvYF;IBzz6CeM$yOdAnIO2N@6*&#Wmxyi#q>0JvuJl`{saM zIgi0QHN8E!#KxkF^j`tJq93u6hptgb(ZdVF&$Wvw#IIa7Q!?*%(TT=oBv}}8MSmN< z;>#maM1LvfJ-s)bv|1lY_>{wM0f;1&?FUK~;<;2=gBo>=U6EDD9I7~_{l}nGE92oK$z0%}_SIM{bO*~Ws_UJCGZz|Z4drPGs)rPruX-zN z_5M8(QbaFcIt5#HQGLk9_8gqRg@1Y_{Ndv8l`wvFD%KOo)b!SxS4-;gizy`ic0gT1 z;WKjs4?e*1+_pi8pNX6g9H3H*L$t^}eE*5!1tzZ2PF>V4QaLBpRL)UOX#iDN-@0rv z4Fp_scE4P4I!Qg%2niJuyKV%(0*!%JK8_v4&c2-n_}{-e=W@rKg@3R=`gsf`6w|@dZnJ?y%X{9nnro zxKJ0F^>nUS|ENzXM(%1;z<)B}S{A0_J!gR*o$5w_ou4_8a7ArUD&XDtX6eHU*L#pg zPmCp2XC)(xHDmX7QwDy+7NnuoSLjPHHncM0&S#*QsVg3X=*y6)=^$i0SL*Q(lqrQG{_z_1`foqIe~kvicYjx}(a-0j;b8dw zHR_K>S0iPHDO);JFPaJUApX1N5*pmA(I{2wg6yK^0`sOzCe83kO|NOmSr<7lA(+-` z%BXW0c2-JLt7!sM4tgP(SLhd}ZE)=ViUw0{M4zAc)M8hLmpQ=b6BM_w5M)NdK^~@f zrVPOlFCxYQBvkho>VI4&fvQ6y7Wz?W#cP66okE2=Rceqy4}BqWuEDmW8j=|Fl>CLb z^srvqlXYmjw50pRv~!rW_LV_*h4lrFZfFb+uK#{%_~t z`oKj)`cDi6ZCL-EqtoNe`aeB6#DAZsJv=o3i5A4`YU5x^T#!p2&O9|O_8hwCpUua| zT1|uX?|n=iZGSV+&}Y4_tQ@qnk1w zJNE@D{?2|JMoWC`n8R$WO%cw;-*hLA=?vEPL#q&-n%;@<@xicB^%;|DUDZ5$beNPaF0gP=8rVCWK+%7tKYx7KJ}!$BtTSkur(J zgNECKfePKwWhQ(Lg}a4uUFiVoCbS6Uw+(l-UHpM4p$<@qjup7@L5H5>a8@z;=)EBk z?Lytqoh$A6WrEGhj25w)-{E4GPff>Y{e|Qc6R#bODNeTFrRv1}nClX2?W|*~5qY(e zeSeIjF9K~uZlk95%hx3jR#6j!Kh(}8Bv(QDNP}lKviSX)AtW|{H0neKLEI;ie9$e} zIy2q=KuM~Z0T}^Q54EqY;3GvY-URu`>6@gzo=k4minmGCiZQeW<9;E8h`2h zhen);msDEeI_;u*VscIYC`%JPKIY!7!`kH`Ng0CWL6z%2-~#5O}za77j%QIg66dDiZ< zTB6`R+)IuvlM-h|d;D2?b6f%IFn>PwiS?T=&Q4nEASMA9@~qt|0t#b&)=U}qXFoB@ z`1R)z*e)iXiTdRj0J_M20VDX~45!{96M*ZYW|%?R5X`>I%FuNpb99q<#95|k$yn&X zqYh-|B*etRO#nU0M~<7}UhxIeatKenj$`Ghz|Kfniy>f!-Gl|w?j#wGCVwpozgs4a zxPr1s1?=~$E7qXSFWWTmj+$a$6szcov_>csNCTmwmWN4 zCURjGMDhLu)bSai^6dK`E`P>@+y1b3eKi=~6zzy%($;FR=n}0)t6NMqk>XWM*?5sw zQMV9-c^Vx_i#BlWkhDzd!3?M@=$%O$DMl1yu!Q!q7a+=Hnv`&)Gwn2R(4p4 zi7Cv(+YWPVWmH7J#sJajd=_}VT&C!ipfAh~%wSgfFDIcZ>)2%TJ%2?#qSgLvAb!uh z^Es2uW^NbVTwL$MMp||YDTg2xDHrC`nKs454LID)IfyxP9h24=x4D%5dyw5MC8L^4 zCP0s^=pCU%672TC#mh0+j5?x=j#??0aD!XDZl_=SzzK7TJC~b*|Nqmde|s_N_xi(| z!TIIwyTN6@$S8crnSYlZt3Fyy$?PNcKfrPX(>y@*S;E{`4vwV`hXrstX;;l&!MiBa zDv_j_9Y|7178|tzCHLKCv)q&jw>An{Z4>91p?WDI1|Ec+Q4O2y>r0M6?K;d`eFS_N-(Q^iw%i*ZLbuyC2?}?2x;IrNs7p1iuA@gA><0`pg;7DZ8S{dbV5@TY|AL{MAT77#7Xe zcS)vTFKrh-G6z=Wbk}NLgf!Qvs0V$24jypq>hilgprce>pM`MX5YUOU*eBg)Q*T4( zy&DDFzqjX@&fptTU=`LsWez;)La6oY|EP8PCVzYX+nbY<&SCvOM+@`+FnElPVTbfT zEgkzz{L3KRNo@qWzmM{ye#Fb4<=*-k!M`lpAjy=mq`hCPvn39r2xJ%|>Zlo`g zGg!dIAzSdpvIPqV_7VwwAq$MB1*PC>h9ZEt);ygecs=N)+)3;62orz~716F9<$y)! zi+^nr2o;OFrVi$E&dkc$J)17h^^8a>Waqi3=JEh#0QCY+=q$WTgwWYwM%n6d{~qi> zU(=a7<0Toq0q%)xpv5w}s(Y$Wy*_XOTd;PbA z^Wp8g(be##KkOB|Nb8hQLM3o)OR~r%DH=?J;rQnK^78iW<<;AFpT>oO;Pl*UnSV~* z=hMjC$y4AFHpC(#x1al?@!)E>6S2(gK&8a8BkSyAXq}B?>+EH4ooz?AF3g*wS!52B z5gPYzZa<#?`}XR(Kh!bP+u^5=JB=-O0aGbWMej+ZylQlMgYo&>%l_@B>)!cIzxQ@0 zTAACSGQ>8A5|r&^1!7$wL4h>0Ab;8Zmdk9sE=Cz)Y32b_xIo&Qu*4wiGVL?DvoXHs zNUGd2K7L5-{c|Vw>V;V~XHDT*oOLP(=QQ$Nb7+e%gRUTN1(&hySL!wr$V);|mmMYW zYf#@_8sqqR4lF#+tUj6!hWl8~&%XbmHyV8Ik8Vf(_qXG#ix0Qg=Qls)F@L0GY)$4N z+{a#os1@3q2J?nX#`imr2#d!YI4ee#QSUqEHl9?me0NF;NbL9p!!5>@5Iz$L4u_5& zADg}#%m@|D(hy1!6JI5%Anu&{BI{8kL3h=G1xxWzP4EoW1ZJPxR~RmiJqM*B^Q@NL z+FOiSt5nO6ukg(w%--?4ZYqhs#yJ}J9V{)ZZ*sQ25IMJq18Pf^`G0j)?GEU*6RvN^ zHNz=cwyD6tZuLlJLP{%7b8m%u4@lb5OLi*I+YALMFjclD10V4vuc zKhZ_M)a$=$ObNUBm%6{I@e3`$5NH8>q5)#tGWC4WNyt4(P2A2Uzte~#MsLHFB% zpuU5%GF4OCR75LXyIDjzgs@#KM8$Dd91|>n${l7d9L;M~_shs_9+oYL+N}0EpeG@; z8RHJ2$t6)lL)XUbWzuSvW!3Vl^(~s>Wh?qt<~g2=}yn5l)|62DiQKf zrhhG&kUdE7cJuN-u*dEsC{<+o;o)$5P(iDD{(pbUB_JEl|LvorPWJwv^{%aBScHpZE gJZS!d=0CJUJG4VPv}bPr7XSeN|Lz~lF#xCl08V13RsaA1 diff --git a/charts/latest/blob-csi-driver/values.yaml b/charts/latest/blob-csi-driver/values.yaml index dd732ebc5..07d8b214b 100644 --- a/charts/latest/blob-csi-driver/values.yaml +++ b/charts/latest/blob-csi-driver/values.yaml @@ -120,7 +120,7 @@ node: installBlobfuse: true blobfuseVersion: "1.4.5" installBlobfuse2: true - blobfuse2Version: "2.1.2" + blobfuse2Version: "2.2.0" setMaxOpenFileNum: true maxOpenFileNum: "9000000" disableUpdateDB: true diff --git a/charts/v1.22.5/blob-csi-driver-v1.22.5.tgz b/charts/v1.22.5/blob-csi-driver-v1.22.5.tgz index f8beaac8be0fa2c7f2683c26ec1ab28663099fe2..3f2d712c5b667e962eade50ee5c46d3e0e1432e3 100644 GIT binary patch delta 5455 zcmV-V6|m~05y8y>N8#}Hbj>pLyc=KY0xoj+O z;D5Kb(rUF@r*Gcq|68qA@&8u)^yItto0F5bt<(1LaqGKQ`+rUQ^yoX(+8+zmB@zS54l2l0ZC=Fq(v3cpWhu`8IrbwE`&9s|=YPB1ZncZ^|4qAfnE%gFNPwr% zsUbAR0}9n@q(H1DD@a0(KHlA~rK zP#Rd`X*%oGgD~>_A@hm5=%D^&z~qpF04i$;xq=Kin-irffNw@j32vBH%FsM+oHSbN zNcvBFJ3T)(by;Mg?dt}y?TYNVFDbe3rI4=C?LjBz&IcQ zE(a;p<1wH}fQP=$fNC;?PLUQ@p-^y`bkI!7P;{CZrW=GcJ?4t0#NtjgT}CHl8gXz! z<~c5-5<(LSys7Xx8krMvGRG245lX(I`0a25?0#X4ElMVwYf9`%14i3Yz4WxLwggI!7 z38Nt=j1#%I@UhVNfeK|<>A_S?ArbL8nFHb=Sj1gGV#c`KRQg>;T?l150d(&))g%aG z(oV<$0&UhcYhjk3fgTaD5E25%^&=sHJ1!@ZxY$?JN8m;ZTFV>UOEguSs{UkQ$heHC zdMO#=5k<>Me}}L@lu^!@MB@b#Af=kpSWSA3?!sVG6vhh#_c#cBKppkF0MbDZ3irAA zER?9XTdg*Ftd)TtYqiXjSPGkEYACr>>ghuFS30XZ2rC8V=$B@#@v5Okh+XI)H6V`Z zzZz1q#=iD~Q1$Mz&QknmOvnMquZ-U<^@N-evFsn0e?`8hCX`169{b=aC6F?XJO-*? zCGsEy>LEtar%^wtUPDS^IdR31aoIsf|2REC+2OZow*Lh#OK z<4Gi7$bZ>=hRQhiSYsqqNbLJbg=GU$;H4|0-fp~UoYZaAjx6<#(uzloqejbA6d*t0 z`>PP>d*Z_&3Nkf=!i)Mp%r?-}j5YV?(8ChC@0RrvdKcK8!4S(?2Q>q2rsGiQ@*5Js zNa7&uQw`E38NC}QTWbhBAVL_+r}g&j+p?a0R)5V`xxsm1W&2bj_%}KcU$Jy&9-DsK z7VV^Pt$N2ysB^{oC%q#va#x!GmJ!#oFcBX(i^60FcMaJ4i4zG|)CD!&-;ZtNMf}wGO}1RHf|?p%r{&?8d_DwE(BvQD}(KP3W}M!;xS5okD|$h+;>p@YBX|Y z;C~0|t*-xpjk+H%m#U&(N3YOQ-yq|#QjdS2OlcJHx7VoKd-wUnYt$dSzj}>+I=>$D z2OnOe-u3m>wKBPs-5l!|&4hZ8{;v6iM)#^UN^SZeJE%Fwyy=s1Gk#Ljb6RrNK^}|= zrnQMvUx}P)uG(? ze=#oIte19R9mb>pw|gtV2|HQe;3q{kP_=cA=fhh8M$~Vl|FDD}T#K#}dYi zlw;%EZPe_HYmk&!$O)AbRR4=`{vzu62AIF&EhG9}`^=PEMQ+AtC zY@&+E1i(0<4K^Hx1xJOk$sZn2GPiSM(?HfrdY4RHQ?aN_t64S<^D2J*44>*Hp{^f! zRjsMu%w<8ysNO+j{Oe`75Px&&u*udfk4;}^yvVmbiE<>oPm+;8U*=5M5iZjo6t+^8 z-_frTleu1fe5|PrP(#d`S`2MXh~=}9Whb6V*}#N(jU@Xw4*V4mOTxL4HPb$fe1933 zn=qbw^aZkybC|}?GJ~CRSPZZ!!lj^_uEa5&!peGR8JAPtThW?)!+%~{=l+pO_4;oK zlI)lkvU>eLYPH`M?Ej3136;YGHg>I%>KOEkfmG!(T2JzavVh2UMar3S5MszIdfHo5UQBynU>yk&yP>I1GYv&4*%gB79!3zsn`hGIEn1w~x+zaiCSIG(K8%NXhQ*MB-r+Ut`DxY2 zCp|RM?7OVeisxwu)ia%I>L*#6=<%`iZWZp%I%tWf=<%^`I=~mOAwT=0nO_GG!h9;S zVxYRc?#$1Q>ZOO;e#M^0L5Q>Oe>m@UuY04>-zyP_ZrGdZ8@#yHIn%@6NfhY@vHvNX&lvTefQNhWbx4EKsJkd8-q z=5-qRMt>G#A!qW%fCY9lW<>ia%W%?ZNif|p`M(>?_+0pXzY~51>hj#n1MfK9_eF7& z$)2CqRRU=s^=K@o-LWVEnCufLWL%!vt&=>rX|{@`XI9F89?ve~ze2!Osh%A>h<}dT zwbHMXbT^QBy}j70sFyL8E<-)zlt=6_Wp7w0tC7ZIRd$Ox5Z-~Vti>fiPT z-QiV#aARB6;tbE_WN9_@zmA=3CLh#J*|>t#kY^b;Pm_J6Z57sDMaLu(Oo2*E?LrcW z9Z`zG5!zL5K~%^aso+TG$e9#q$9OQEVzV+GW6hx34s&WMY>0k^6`>RH90)?W$kD4n zZ-33|D_~aouOy*M6W1i?Jw-jD<@#(Oek;5SIg`v~eh1xL40mB8FS}OCG029Lt=Vy= zOfc~y4mUFnV#a*WWGSX)CS~{mWT!~UsOE|e&~3B5Q@ehG{Vw=;F#?xSPjt|6D+d!V zaLd>2{Ob@nVV=EmwHf%opRaqj7uUURZyj*cKfkKjH8r zEUsabb`2$eJI!XbDG@Ji6pGp=%@{-VdQtEa2L5?-GaTIxudn{~Z#(_67R%r*AhoV9 zYdGA5!R`6mhP}K{eYmiZr{_4yy^?Y!Q0&@tB1USGs<7Ya9{I4(TuS)v< zkG@$?S*@6EhEa>B+RC5aS?D7r59Rr4Rq-<>Ok?t9=lX@i`0%@g>!8Epikoz;CCpAM zhg}t~<*}@ntK*Q{tPXo)r+O7wi)_u3ur6?aScuM4hiz;kTdZu2$YwZr>CA^i`EIlP zpV%tj{Ysd1`TuXq`9JN}A^-nbisLxBlpkSo_8WIUn%u>9&148&jQZwW-|A$bypyny z;ZZhN@qLG37wil*og&IJiK1yBmX|(C{9Vq zBHSf&u+-8Q$r-e8amW^Yv1~zW!(Jj4p)Y2E@w}i|E@`jFJ@RlSpe5EKJovQLwk5OsgI_pX0F zxP5wx^_owoia+O1dd%v=B0#2gQ-6l-JD-u-oCrMdiVbG$Qo|W_q~?U+#NuT z!aY6~zq28h5V`%*yB_tg20IZe+$dB@tk|-neKhT8ZQG9a(zv5_tsC`jZa>-K8c z8|Z-P?cnpLok*7MQmUk>=skalRInMHZhv(C?y`6LdDuO_>2=@jM5}PoQ-Rp}ID)c% zoJg#SA}Ek%5hPb1q`R)Z0@*6ny^!~sqJEBq$Sq^rhosy;cT%pNSDWUWDLnJDqm2V= z8u_mIul5_5H;^~WwP^d5y0rw#l3=T{qXd2n>f1|W8v8DR#T$y{py_`=c8KNd?E4?O z*ZnWO>)Y$zhuhKB#mC#>`OQDe81gc4eGJFblInJ8}DboBVx4E<sxx4w!5k;iM~8J zY4{y19IWq8cD@ohbBG6ONtNY!wb{+fD?41@Q)`-2F>Q06`6rwW*~&`a?NrF&Y{ubi zMhR8LYPKG0eo;Fd{@94WT@HWb7yHe{bwa@RD&(~AnYuE7u565A5rV7Llm04?jl;w7 zly7l7W!Heia~ywbS+xbUZm&2C@f4eH9=;=<&Ro`7&dP$WqwVc)ce*BzD9DA0(#j^E zOBp-+mB_=MZ=UXIXi~wcVLgNfr4<}Peiy2)2F_N0SqD{QolyCau3u>AB>UOXy|xx-1hmpLi7z45h1 z3O8B>HWz=#(kkJsw7*ueR%Oy{a*FeCyztLes?Y!MF?OB3Tmru4{NL&E>2cxwU;AzA zJ(=0om)kcP&QoH0&*)9Y~N+~CSnL%L>c6*+J( zfm7^fFH_YQ-|l{XkSpcY(H{rEA<;+vL#D?wMuuU)r zDtG9)xHYd)J**TN-6%VRfUkpGW~x`MThJ`g176J{|9UAPJ*(DOg}svJOh@p z>HhyImw>F<|F@5iOZWf0IX*q?|DU6*+5bQ5LZFX9{E?P`7gGG$e)yxP{OrXE&!X%t z!f;UY`!Ch{KNbLN1mG#2;C1@Hb$U|J|8H6c|L^mZQivvR=%p(`Ap_H@NLKF9*4S&1 z+g}{Dg%aHMZlcZZCCXj?v#Odw?+`E7AuG99$FT&8H{sa8ttFjB_pt8!xd+wy-&3~N z|J&~b*6RP0w*~!wbaHa|{{OR-7t{Z#L3z;p2hD#dhjJ)~awyMS{x1Lk|NqPJ&-nnT F007la?q&c0 delta 5418 zcmV+_71iqRE$uCkJb&AA+c>iGtgk>Rv$eB(Lfw4Htg8IrSV~qEM|N3GX7@}@xgfG7 zVM78O0935;_}@1v$#?DJlao%TbJ{xYeAj9pw|`Gt-=Wt2Sg0Di9tf9{Q0j;0zeOC-Tc3^5NHUJ6fG$P@ zWTdA?aR6OZkg3|di1DQxeJIOPod0v|M+~@HpQDffPoY~w zXp9BiK*+i%aDRCtATDQuO=QDmfqLt+v4TQ~L&yY?j4!&V$%JTriN?SwNDxYnnu$PZ zV2P*ctXB`h$oH?APuxWp4JJb-uQ>>yvWAc=$TeqkqErR&&4?+%4bw^)nn#V3Mr$2O zpUi=R5Z9cIVKa(Jl~7g5(CakXt@Q*c^Ps0E+6_#JkbfL+(NI-6o~8~@s^?hvaXkmF zAU7b$Z`<@RHx+Utx|USU^;zV_3&wRc_KAC;p44iuUZES900-&+q{USz6dWd9G?OwE-DZaA24PK)xuPkt_#&DvqZ2ZXI5;8m z9G6jv2|)$kRQMc?%n3P}V+l!@E}Av{{5taek>RF`e*UHQ3OUFuZ^0rSyu}At7?2?j zpjH#W=fs6`*JTlvs()P$SV9+*dICWKRg=5|QUNZL4+AKFzWgX09EN2ZNbz(DbI=wO zM%SD$PUPak$3o)=DwJWR2U9VHM8xN04v2$b5qANJ8RK$O>311*A(ZI^(7n@ClOT*q zJ0S-Mv{~1zg;{=vdPKxRNC+I)kAwv7xSUAhVqZ}ofg34kEpKoy(NuA&`jdqr<1(V^ zrDTXl6fGxz9l`=pMmb{=jTcCOlxj+2HR(0F3xiEj7%vdq;~?|_b=B_zNEbaQ+~?wx zP@>*$wc6;hRt9>k)iP6JDQuRhq2yAjrwiR*>8$P|tQ44|Uz)YXtA-XKcA<;ZfH5{9js{8f8d4I=i7T!dmtEBP$LZ0red(J4_T?-E>(un};1d^%F4DgudPP5C zB@bPrkdlWNhM#L6Q%GO=Y^r45@1hfp%Sf^?>P7uEGj!I=+@yoL(21l41T zEL61>wtD}b2q}^mFqwcWyQn^7BXXAOuHPF^aD_Z;H}Tb zlSsfde`og@D&yQ^jge3xvF|4pmJLXOm#&O@yK&q&soSb`EcH5R#ZIG@R1_dT;`^%* z=sV)WFbXm?gTjmYKg>4J)QmOvr_jR^dT*EY5_%WdoxwGhvo2}|+Dylx(&e`#fRV&O zIG`G&M>2XhP`1`J@PG(mET7h!H*d;%_E|Mwf8_?}g_Z47iQwPpM0~~4oq25fZCkXH z!nNuhGoj8E>mT)w#K>K30$4^|%fdvw=PU}79o!9I?81>WarG`8|pz6LQ~0^{dgynSmdu zf492+dp7EQxLm4=dL6w&OMQck!%98=fik60#NS?{UjOZ<_pi}l`0na8`sw^;I2gWv zjrun?S2xP!Qg(B!Uo;cyLHfJq6B^yC)+n{UQCYaW0 z%7F73_Ld4#t7HOH4tg<}SLkP^?QQJ;e}V=RZ7`po_ta!pMwZ#E=-m=`un=TQ!9xM2 zc&d!P7%w8m+9Oog7iteCfvOE57Wz?av1@`-?Kp+nL28ge4}B$aropzO8j~3Gl>EiG zbhBRCfpsWPDn1koH`-S$Tfc`u(#V_p2`=(JPt|DK$lwh#W_=P0*Ie+R9K z=_q|Ojkls6gg#q<_Mm7_MXYKXwc0I1?hM>7Ymgb&hQu-wuTkg&7N9ne-CC?|UFjpo zp^4>s_V9ojH=yAfOM4bQJ}QUdV?2C`HOJwDcv0o zh76w=3R*M&J4dI-h53Jaa&n0Oe>_Whcxe6|%}LPJR>Fk%AQvH=1*%)z8FbO#n~#sR zng;9NhnRZWc%ZTCSKAvpWkAC{JUGaM388@M+6Yu|b>yV4aO6wWFj)qAYBoNbh@*nM zngi#=1N6JHRH=(jkMy4;FxDZeJ1H`tp8i|&R=dznWy6c%PO%zCo|R>!e`5*bMar>p z?lx+6#x+PvEaZgB39A1^IDZlKd;`qi@s<(&u6<@oOV$6Uj_P;yT^hrJfGNApC^k{W zWCCEE&;}b0!-Av2*yIlnD4E;2v1uS{CA~|guBlj5rqwJPhj|sheuhu=l2F%=ysFkz zaOScgWK{1UGXC{4T!^`Jf7oQ}mdB>AGhXD|okU%J1mc zh{;^9K0eme2B;xsO)ZADCdBgD$g&g9q-Y^|WCh0F||DLKqH1(VVwyNz60#@Tj?#D3eq? zXuLcasMz;hWy04`yjmC^mX4aPLyJ&(+3=Uk#qWp`>H(GLSb>WWbi6r@VcF3q?+uA$ z73#)*UFps)Gi+|mf2fVs@(P!-d}=Bt^Dic!>3HpN%yF^>FPnDUkEJT9)-HOu8i{)= zTgQ0(63|BCKWb_ReO>Zs87eXOW9?i)av7OVGK}vkR8S;tuH%ogpnNqLWmq}G@e~WCfg{9hg;uS-e3uD_Q zG6Ry<%je7ti^?VcI)g36kaLhP2R?6tG0{cZt6L>#S#^*Gxif;aI+htbcyoPIE!9P0 z+2HvGBy)ks3d*a5F56)R`*5A0S<*x@gH?K)HExsP&ZlTBSr_0c!~U8xwFga~Bc%4D zU33#sNdoAZe;wq?_GQ8&DdAkqWQ!K3wrwSEL1rl2jJSvv#K? zEU)2Sa_pEiI44=+&+=>I5?F`saY&qBLveP}S_LtVxR7V3G`=uxd*O{Epbz_|7hvtWOuEU5rELj?66WKQ5@FbHsErxr=7f8n=Jo7q@ zd?Smoe~>fzV!#5s88f2Y$ugXDS`timO#bf%Gd>r7-|vK9fx0~R^1wSz_kB^^WU}X{ zb(KIGNIe?MX?G+_04Dpy2^p7XcIzb1ZJMoO>6w-CpU1O{_^%LfRjOyl4&tBVcCGa5 zB;5^UUT-hag8d%&crgN(QBQQyQIkIn6(w%^cNaJPUVnHyIKRAmH@NKE zjKX)E1;w`N{n3O>KjQEMEN)<>Q=ZmxzO`@>r;_SfgP|E$;#fpjzHY?_)yq3CGBjsFNn*LTo*>v0)Ad$v7O zv43)*p;kocT;A`f2;r|Vb`@tr_)QnMEUI@7*t??s!lEBm&!h~q+CkVbd7Y>}0grL~ zysT!BJK>k&AQlOPUr5u%j^vS$P3==~Wno0N@0cC-5K3m(ZXL4cNH_D7#iv{L?jy6} zwfd?O$lZNoPo%Zk{$f+_m(_Rn93HX1I)5`vJs%vSz#Kv&0S<7^_?gZi|Le>8tCGI| zqi@zzRx75PVbtQOw(_TU7WzoZLwUYhRs4(z)0n*3xqjg=KKvfxI_R*t;wD{d3A5A6 zVOPa#c`WPY>Nw;!tHa*dsa^%vB3rX0tP30#qBGTD8=J@$D_bM784g}L^Wjjw+kY(o zC$`FWzY=C${{L|~|EJwL!2GZ{h`qk%csw>lXp?<6dw z`0S9ogQdjo>;`^~2nR1c{1`HCKqu^qW)HTNjiK`vIyx%e|MceM=#c;SEG5qW z!w@h!f*sQTv~=t<@vnezC$$mjlhz~2v4m-STwIycgig&N?8Jg99$qiFJ@RsH*rLwuElB?1#)ha$~&0OgY2 zKrm!-K$Dsfb$_q_c5pttdv|j+yzLKrc1LWTGD@ffj$KLSrG!RiIDfiEv zl&j~}ruk+H&-|>jabQg&-!=c$egpFc@@BafZNE~tmOxn&Y*luYz;8i)dudE#-zBhk zL$Mq*9e>CUv7DWK|3mL)@VS3;chi4=H@dp`aCd!v`_D3lyo{~L8if0F8=n-b6=yK7 zx!HTa4N0&B%!4yKs)BmoF)#6C6MJ%}fPQAdwlvp{D1&$VOLD%Aexza$5LIT^T@E zHb$`s!PV+Xf0f6^;o*47w>X}%Yrx?-j(@eR+5%d)SDb}-ip@6<-x5z}E^94kWkJ`` z_V%|sU6V%?PG9!lD_|D0?`Bwv3xxIL=f9)DHg z>RbobUw+BiN|bWUS9-=aU&r=72cLIwD-Hbe)X~;?B`X7;D-XM5L01Gn*BzL#NwKrC z){}h6`3>WmGf^wdo84L5iGvF3=E)s(mws|{7q%+hBH{FScZ3pgHid2AG1ZzeaVR2>rj_3aCF!KdHYwW!ZqPw*x`f$0XR8syW+aqm@6q zX`>uNxIPwQ!**`iCYS@2JM>)Kn%AfvR*_rZELRY<9PhP3PeSN2#yvu_Q^H0=pN-o~ zr_~(Csg-BzTS7fTr{7}I7Jn{3cE&NO-H-f6h4PR(Vg+Ux`?(==Z9@`=+WkzKiG2pk_Bv5qs9MPomH?pSLR#+OZXOz4|nT`s~_C z$@g>Xru_)`e}Pgebi~FAX|MW7jY#EaEjuU4wt7CL6o1yLLdav8{(q&SL-ruS+x5%; zgEe+1LD@v6A07^#0ZZ9*|NoRrK-TR4+eb&G`+ts)jt~3)=O}CT|IfM*=wlFnq$S{m z6o0lK{wOLxdvU_ED0_=A9Mt^&OSS%w1ppfXc#0=@o&Il~o)q-|aqHm!eV$SZ(c}%i zbR{TcU|JQ)${pGo8*6g=i=(zsg4^CrwAsBxxyyf6RWs-v;^jJIB^T>BmO$|)92>Z` zq_gNA)_p(spj!WX%J%wy`<=jA{eSYNp#M83$A|C#KTCNr{hu0?2hD%b{D*QVhjJ)~ U^33J`0ssL2|7}6~C;+Gc0JKrW2><{9 diff --git a/charts/v1.22.5/blob-csi-driver/values.yaml b/charts/v1.22.5/blob-csi-driver/values.yaml index 7374e5b01..454c2dd26 100644 --- a/charts/v1.22.5/blob-csi-driver/values.yaml +++ b/charts/v1.22.5/blob-csi-driver/values.yaml @@ -119,7 +119,7 @@ node: installBlobfuse: true blobfuseVersion: "1.4.5" installBlobfuse2: true - blobfuse2Version: "2.0.5" + blobfuse2Version: "2.2.0" setMaxOpenFileNum: true maxOpenFileNum: "9000000" disableUpdateDB: true diff --git a/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz b/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz index 0b637f99383e0f1480e4d2b7be2df696f96e0e5f..7a633413c8ffadd040cf454632208c8f81e26e2d 100644 GIT binary patch delta 5820 zcmV;t7DMT{F1Rj`uz%mQ&W?`T$KSP%PL9q_zC*43u~6OuAu<21^<-Mv!Tm`Z_Q_}9 zf-u@ai=!I${luS*_HpC5=76v{@uhxv@q54ly>;0HjV0rF2Iz9!M@D*T6noG?8JVgr zvKU`_(1*5a+4aA`ZUADx3Sj;EKRP;UwX*B~ZKsAa{ z7hiQr&}%o2T7R1fQszKcFSHTNh>#p_F;LYxT&5O}x~Ew9aWeVjOvl0}^0!kU~A4015;+=*t|a zCPU~9X>k<_1&3(|&8753rYl3ZJEs zSw1HVEFqfGWwWNA4+Ga78*V!2mtSkIkcG_l<}JgIxBLK012VuK)M^6wg4l3j+bp0` z&9BV?OXwiU13<6P&uS>@->*>!i?9I}=nHXOHD=SrK~u)jk_8Pk2C1fFid_MSOw~j5 z1ylVn5PzWl=7@cv4MmSp$(ic{@2D?rnZ>wzXbFs7A;m0WQ#1})2>A`?JfhFIp8im1 z6X>)U?trMqC)mzTibB7#8MJ6zDo$->Y$(*2QYNwbOUndy9Q{AnMRE?vXO5`(%(;l6 zI_TjskH*ClaMxkD(g3D2vMh8N2+6!mKSCu#9)C#eV2NwC21#^8=oPyAQCQgb^EQy; z=?WI0EhdbHoG?!0^3ugZ;|D5~VWmH&VhV|XFUSHA2f+ev0}^w_WmW078MVQe@dD7Z z(^QMV4@p}-2k^96*R1(Teg^uBh^3I=S+*Mp3EZ+dk;KNXqCN&YP|#Z5;9jDc;#AEi z^M8HDWkA(S$q)}HS}i(+1)_{{#w407kpL;xlE!M$Yjl?eo1!pTBDlw%?*i(mzcU~m z^q_EGh)+U^`cbQOgdS^opvPJ*u_RW)Rv)4G~(89+y zbddTWmYKgAQnJRr^gLh9?yAp9{O3%_0e{FZjNh&Fgq#zxnjf15uA>%|Lj<0<;3y>! zGmabvYF;IBzz6CeM$yM{Kk8mXN@6*&#gK8?LG6E?9iJ3$eRIIBoX248n%*8+avJR?(#US)Uu){{bV}564pp4e z{$o(8mGSVAWG?Vf`)Vu}x`Sm0)%8xnnG21yhVrxo)kBKRSG^UsdhebHDWVrJoq{bp zs6Jq0dk#+ELcJ3HaB=ub7{59d>wgJkYI%7CD^-3&JQz#t`F*NEx+I}?J!NVQfqO&< zWB9b*ym^yXv(KvdO3ybhtZSc21piK_;tQ7c++nk?JEEQRZK2vR>*+$V{!t%DjNH|x zfMvk7EKJ3F&H_I=z>NSqKYw!~;hNf@R{Oj0&C-VzuJ<5~N*GJ5jzvZmYsSRwq-^IwkiVRKFgNtvR@!daL`tXXEaNtCgmx*U>Aq zQa8x>tJLEkC{qeW{Npw1_TGMa{~GlN@2+2?pD#v({^0#<)EkYiM}NxVQf70gUNjTx zLHu{kB{aBKqfuJZ1=&H(1?EkcOq$`7nqJeAvkr1#LNKk>l>O#1?5y;o*2n~?>~}*l zuh1_{o7>p^74@guUcR{Ks>QBsEOS=Thb3-dA;^q^gFH;}Oxb)PUPO$sN2u;E)DcVq zRp&u0^rO&V*94_HaDNJQfYc!UF8V^`T!U>#H6+pRD)|d>>0!OJ|LV}5*0TOz37+p_ z38Fc;zUhq{@?Jif#-{ks@mV|L|2;iBJ3aV+pQGIHtyKD*CgQ7hZ zp{{AvYBvnobFl9=ATzEFiDe*OBi{uqKph}EwNTyK(nXdWZBYJ)?k z3~0EA2MakcC4Ur9U0Z<)u8yqu6%Jg98Yas?FU`hh6LD0K*9+jBIDr08rYg13*|Gj7 z@r-$h>Q;;lsH6X_d8=J&r?TP2aHm)eBhSh-(y@feGUnJgcN?{0#x+Px%;$v4DXRZX zSbr1s^Z?Ai!!slLL;K8>ma6|x9o28^y9|bT9#e*!QGaZrjL8VVIH3(T?E4u z9#FKlQ{Se63OngzGPO;|tTHWU**MJ0_;nL}s+WY?Zs3%Sri3$_c|N212$Arw=iz+J zrNyROk36(}t;sSy_9)5`@ji{d{KYC~q8Q;S{Xt>Nb@>DR7BHEb)yK!0IsrArys5>o zXb7QvHh+@rgexf-m@u!AWdFgQy9Q!KIQ3=CviAemT?OW*jK>~*fr`JgAIHuTA3NqS z8(>p}b3r%ViDNp0wf)d4E~loqWHjl4y|m8#mRkA#Ul1hOF)d{M{@-r3v-w}`)|)qP z4*UOev^zqb4(e*d-UBLY$%HWM`=Yrxszou+*nh>N)>@=YV)3Bi_F$kw-*cG>Uqj(; zVVqbxYPtz6LiugOU2PYCAWEnMRH73FE_~4O<~W8`j6QmANJP6(Cv@yedw!K*vofPa ztmb#PnB_CmF6q9;U+;VNE1nXtlZnAaT|T^bc)7`bpf_A?1!AGGidxAA$1<@piw|2@t|u?kZZ@6 zh>ygCQ$dq0MjYF^F;7+^UYpE53_tT6iy;Sn!fV#k%c_x1dT7MicS)rcuG0>xCx0f_ z^pCPM(c@$8-8$TzP0$k0(Bos>On@t(B0u}1ncfEw!h9x@VxXqIZY|E+_1r^kzhcMX zAjJ9iKU{RXquzM@j{-m!umjwJFg0v5L&>1KCyoD#rbJ#9mFKyLVungwTghksGl`c#{KzEj50p_JObOr#53`~90NcX z*)L!OKb+yzJ7hv}9n=idMjL|JcUc)4CelSWiAS7TnwE@(4ngXWW==v(RNREpqa@_G z8SWKdAT5XR#OpX}jtcCIl(iTFX4p+wAV=*a!_lNgfq2WL4p&eXsnGp?b$`Vg)cIwb z2HsIq?2BR*J(1Q3Wddm+_19Rf*kh3cFo`Rc&$v7zzk-k|8f$#vW`t&-&52hTJ6sU;`huupEJp9=6`n3&E;?xHqx?N zNI3+lNVzbx&a^2eZouJY&OywX>zD+`xXq>X--GOADH+vVG68yQMehhDl3=$BE?$no zX4DZKblgh8gd5!I^-=n@51cTkxO2G~_`jb_C!Ave>8%D7o)6 zo8_iNxV2HpYMVIQ4Atvd!HXF9>E>oQz8#LP|NWn0`gtvuz?(r@xW8=RunL3Q^S2Fq zX`y<5eLL=cxV^f5e|y#Y+`FnuHJ>q9G7a!stRs}pLv@|FGJlrXzSByubSeyky}{@H z=z8$6H@MMaKfJj4spLEa(oU4DI1|Ec+Q4O2y>r0MHFcK-`eFS_N-(Q^iw%<#it1Bv7{|}cY67|C-(^2y zmVkdpnl`p14}XMgYHx%sGhbx;iP_-{p=5UB(IHuaTFu247RKz|y;g{~(9IP=?(SB5 zBCTrYi>i*ZLbuy`9DX8`(K`?S(cSb`4J{(zj61Y$z43EnGB)Jao-g7txpC@d-^gdK0D;@U@5UZ zAHi<{;eX)7)sH@N`gF>!X|l2{SI?GUEDV1&QV@nkv-Mq(DcDQfg^$dERbkyw&5Mxc z?iBT)=g+|dj$K`dcL#Kos_?TA4jckHaTfce(`@Q(=%Rb0V0-uW9Mc(mLkg_I`lrl+ zCtV1&e*GV}PIL7?Z%$7R`Tx(-!u&rB9;0K}A%Fc(OUFJF|1t=7QX7FTwH`~3CCtKl zab*(|Ix|(+kr|XNUeEH8ejjlZrlceh?vgo}8|jPW3>I*4$QFFDY{9~Ty+lG^$O7YO zK`FSTp$H(ZI!~tv4*T7dJ869$VFJ*hBHGoX9I)tou}uP@VsY2h!CcOnSvk9B)5W=- z5r1ig>>~HnTqB?ipl-kkorM>O5IXD6C|f=5-Gd$ITRKx`yd;A+z&()-v{*)0_3nKR z;aw61VU+MX6lv}Q$hY_kf&r6#S_l&s>leOL6c&Wb0J~OPW*9%2z183*y2|9h5RI`%>3vxA(SxF}QsfL@Cb}w#v-M2f@%G?u`A+|Y`pll~A5bFX73Z$6@ z$@aHgX5)1+$_Ps{517IQ(%yt627g(XX`jiRjqyE4QstKM@k3(opF6QvFU+brYYNZe zyj?jsr;+cPLtA_SbOm`UxR!0dQn!&nUJ{DB>?na>gZlQ;7{||ZVBvXY_0e=N+{bc$ z{{0W#QU7yqbUW(3za3v+ez+Z8-29Zske0DEnTK#6dl8~mXm1+K8!i^#?|(odEFN>< zycktRz3-UYcv{8s-6<&`vEvsEw-{GK_)H`?96EY@Z2E36BUCg?LnuW|e3hhvxO3`@ ztVfXq-Bkw`EX6}L!8247n0;PvaGMJXiqe9Ol69F&I4vs!k4Z!u=AQY}Be%r}QHd)x22sU-UD z=4jw|u(YtgzS;UhrhXRsH{OwXelHTmAi|dqt>y*f8=2!Ca0J^g=iiHohR!{mzfr>ah zl(>A25|>>=BhOJ9v+f93c)a4w$1`kxa`7#3boR5>a)m7DKHA>?dKEfpL|!VO6?Zo2 zWXjmtuS6codtSV}r++vMU!Lv}F`i_O`LenCX~Cwp7MuFVw9>3af~bti-0L((mrR-- za{?E#vl&GWCRXwL{gyXnoEn9NP20|&m7Iv^*C!5ck8-z1SztXif%TW)a<&qs6!WE? zvFiKS-q+ysZf>~`Us}0T5Dr~i1(O8b7!uutDH9r96BgZ6nSYbOu@3Y}Ugq>sazn+{ z8pCII7I)&H#O`_WMBT=}1-c7cr4IDaw}J3c$k)c+m5 zX&vtWewLP534iLNH_XM9`r`hu_|N>z&xpB@DiG4pIFNJ3$?tl*OVkants2rjL*v7O zdkLIkH+h+=zW8?c`a!OAL`VNP0QQM4`4b)VOTGT9#*{F?f2sSc8o$t#41pHFCmLV| zvi=&?T_W`VwJomxVdhEw%TWdd=zcp8)OT=JrfO=NihpROYd4E1hY+@lg{U~rierKW zP`Sg*g`;_m>V6ry&BL+-Xz1Fwy-ZrovaDKuwZ0|PA$0aN z7H#J8V`~zU+P&mAnEV%P8LnD0lyRzhrxx24+guS|r6f^lo6z^4S9wp-Uy)d>nfJ1f z_f=JGcz>bJpFz!5P!W69vQMJWG(T^bCA8xxiuLN#6za2UCnev{t(*2E;C}(7l$nUi z3TdzUNPUsgueF+-DBbD#lv4P!RwY6n%Ji=#6S4;h-fmw0AMCL^2}%{2et7sc58yA{ zs-FL!atX+W^Z(KDaXWke&&f&aaQ=Uew&DE$tbZGUu0!~iwtyE>{K`oER?{l}Bm!TMA)z()Ol`X;0Q+oz|8`~RM$y_o)w4a$S&KWP3#JG4VPv_pI5_J08Y0RR6XLQG2l Gr~m+(6OIP} delta 5820 zcmV;t7DMT{F1Rj`uz%a__UTFM?7P;{$o=ht{xIan5KKTq> z5Jo#_aa6;;pZK%UK5iV>91u1qzSIvdeh)aHw=SEYv1A<209}sz$Vg9(Vh=hfBU80S z7UN3~`p{M_yZ#s04M6Ny0jyvDM@L7kR(AcL9JLPX|2Z1*@P7tPAVDZOYDNO3fhC^Bt6n|u1J@lg zm)Oe=>Q4tu4mk**vWAc?$dI!IQK|xXHDXF|{kT(t=5gb+(b`1PB@3V+#E`QIRHGPm z@l}@uy>{cMwSSo)We#-pLL0%12+8pl167^FWoq%Ldy0i0H*;VMG6F$<-)4rn>5%%O z8%fn{mjzC^VGM(bOYBSaq*iAclPBd*sr+;J?aIk#lST>^)6AHYk@L3v} z<#V#Y5~3+xHf#F%FmTpXb z{MsC_gbtEC0Q3s|tcIfg{ThX^2peF5z7W?{V>VqJG-VtuSf(f@N@BUZJ}mg@t`TZv!cw zu3!P$V!~+13FAaAFI_A&exO1bR{CQqrjQ8uf-C@W5G>#}ATeiLR+WC6Q5$?2F91C| zO|=O8khJA<08g8B&6=O&XQ01`SPBWAWxIipz%82-No?#Y>SM421+C=`?j@QjPSt!e z-+yOZ22{P24Do=X)uKaKAj&9bOrpsW36N4PX{;8#Mt5njDGHM%f_v=wE})M3I|I@| z4+{5%_#~95AGKOX=&_audaTtFOJXH#1xrK8rBY9qdcNXS-9cC>FiXEQdyP{LEqrW4 z2dNKYnfa?BC2QlXl)c)7m@k#O4HwWy>c?{OB>FvQKHWnSE z{|e|e{fL!3bc{lZ9$p%LhAyTMzjE13$-LV^ry7@$WMRk^?e~f=k4O>yrI`2h-hXu3 zYJDW(Qx3lcAd*nFA1EpCa^2cduY&QG6FYh$vDjKFr_p{QjqFDLwWbbFr$jC1P{k?j zKL({*84n*x<^m73uf}4bJ6Lv5UGEf}xzI>!C{IgJJ*3Ed)mve!_wI?1B6H{{m=ime`)GOf+7l*He@vBp@o_|25rnlC-T2hZ+Od;vFed-blpP3tY@Bx+=whco3 zOys=p0F_!CqDAiE`%e@vFmaW3>Y$@)+T}Q>AAl+ZZ(TN-1_Fkh-7i;6O;S%aLPCYa zt{cIxKx5#QC!>DUIBA^Li@LQ7`nBVZ?Z#1~Z8{2&AMyRQ5A+>zVGwwUo_}8EMg3pq z6li*eiu;rAUAUH*fN4_E{BQ>G|e`b?sA$;NR&~e8JM5J8brKN3@f^EmS*ZJzXf)Kk5UCk-ORy zunf4Cg{gSYS>Q(pxDjCIXMav4TvHp=YJWGrS^BWT^&X^A31f-XvB=0`&6v2Ilr7(| z1!-ta6?zbixvcE9iy0_p>War8`geu73H3!#IZ*~9oY~1~DwbB&zI(mgx z>INBqm3sUGWlEulf4oNB-rG;_U!(ru-Sunq^TlY;AH08!dZW?xNPk&e%4`nRi)KPS zi2ts+ga-F&G)il_AUmkJz`W^_Ni%#>(`#CC)O6>reiRz)nxIq%PJf{ekQ$`lMPG=VYq0I8h9vr3C4V6?!gx1A$l){;XM^OiSmn}hiP_(Ba z)HRJ-?S>(H4))y!WX822u?)m(vkp2@xK^xY8`}pi6v;NOc z&yEi3|2f*jL-U_#LA;JO6Q;xkx%A=OQ`2J4p@aU}e0;3cG+6K6$JEi*0}Wlj+ThSB z0~+q(!9os934aAt*H)l{t0OCZg#%ZjhRHI}OSAFWL>v|5^#V924xm4jsY-2hcC7zN zJYycBx)mb>>ga!K-fEZHscd*L+$mPW$g?tybSz=Aj5#*W-A1jLaSaj^^Esh%it2w8 z*55=uJpl9X@XUz*&^|MzrRx7vNA=tKE`wp7$CTk_6n~p2V=@9TPH2M-`+mkzVT|&J z2NbRC)VFD%!cO{_Ol{LKt4zyTHV*SLe%%D0>LsDJ8#ra7DdEg!p3kU0LL~g_c{m?) zX|d_nBM)s~YqCs_J&JNfyicPqf3eD$C`PzSe^A(RUH(A71x%)9_3^Q$PCyMYZ)!0t z8bTK1Z2nif_2$jl zVgG-Qc1NhwL0xUwdq8C^nGlA3Uo;m-wJ7EpyMK7pT8orPEFLu69t>3IdoDBKYbe|; zj1x;oO*f%MD8FsEtL@?sLMXHj@Vo-aRN%3M@;aeQ zPFTi1+$3lPX(EY_m3x~sZllkgPSIGgF2GiX{g5+t292L1q|T!qGzzFB9(2tKa_#sM z@sXHtDrmCBh+|tf=E+LLYm?cB;b)#>G3206c+GlxSvAs04~;ncE~&J_b=pDo#DC

CJHktYi1sC)RJiI6rNzgO~(d$ba*rRuNDb^|NNmxIh1iQO1X#M_{{{cqaarV*uzP z`vr{PhclddhfFB0gPLL5XhSgjE-ORBM7roE@rYAP(~_~!AxIt4%t?reiknb+l!P2N z!@c4Qq~#EvcpXR0QGuP2vKB+Y47&*nf1P5oiB!^J%EpVNK6#RH^E5h=7H#0#A!(T; zgc(p-&^wbrQj93ZU{Urs_-*0IU!dy0BQtNqzP{GNH|b0(S1+9bpyZ%+L$S8crnU@`_ zK3Yx5>?8I+z;XoBJbys+S;B-@4vwV`hXrstQ&-Ji!MiBuDv_?49Y|7178|tzCHI|X zv)q&jw>An{Z4+mkp?WSER$*{^{cMjOOrtY#pKdfI#31+o#v0-vTQGE&y&&r%x8T9OWUuUJF(HoC!0SwAs0N<1p_3F5x;}v46bgCev#Lv(wH|tco}CnAgkY zAfzE z>DgH}|L5p%|I70<%d%1_Kf>hfH|~Bkxr;|NlOc3D?wi8C^~peKPhTd*XNTM!EG4$* zBls;K9Dkg+`q5`jpHA5|O;)z$>e&*Eh2gJ83c|2xw!SMe1$$|`@R2#NDy$o-c@fgw zouVG}{5g2Qv8xO5?tqR`6@C`NfkQwi&SIZ*noYe8U3700Z13KlV>*LxNP$&Y|CBlK zqzj?eum9uLX|Dd~&FRS@|NmK9nE!{tV{{BVq<{Zu>DXuDUk2e$Y9r94)?>-BgjrZG zu54mLXQm1}GJ~?k>sdb1?<0=Fl$0dGT`~uABYlya!2&K0*@7>YEm%0Pmq_RfSztUZ zC|-fOEC)UB$<*0`wVsCA zyxz@&Z#TbDk7uu$jcR5t*JL+_7o}LyEq^7JuNM0uhojubn)4WD?p}`3j|XL0p)rgHpz2U+Nm|_TKg{2Dk4<*Mpnhpj+%Bty4w`mB6tr z$s(7eXfX8$wm#c z#4`5;l@iO2ti6w+wKtBfy_dnYw;kQOFmH}#kvULCXxzKG{dn>3+v{O(pkt=DgHIoK z8e8rvrc#=U-jhgq)#!BlNXO{OF~hX9VPHVNKyZb!ZMx8v)}54XdMo1gL+(lWLt^APT1FGAD`?M;Ju!^PtJ9e+rK#bXYf z7o*Ck_Z@Q^PpeqIJ0%4qcKm|j7UN0?pNRyALr0H~P2UY>goP4-NmE7l@0r-=-%9M9Fx9Cb_UHR7Cq|HFq* zZ+o|c>u&FMaPhHMM0wY`g?|cF0-EQqQ6Er_cui%%w^wOVrB=as^D0y+qy=dPGD;j1 z5=(Jj5D+f9*|daFeJQWDD22qHZ`s(MgVKs^!O*`Q{L2Z~I+0l|AO{y9O^p` z^&L4>CA-;Xtm#edP(V_Nzg-GQ(wlvCah(!yof0|C{7POPKzBAqvGBpx>Pi17P!WfR z5|^)0;<9UK&VJTfu8;-YN88(9uRSR&x@D$6n}@|%hNp~#*@r3Up7}iE!fo7VpIQ^R+_a)5S1~Rd!5GUl1bBJ zPT)dzHlxVF#43Kj-}0u6Q=_o3Y1{d;k`odA`ozKQQSSCA3#_Lmu>SH}&Q_w7V!qTf zR(&7a`x<=S%`NxgODmTO!l7%cV3MF4L!z57WkREC!lIihbAJ*z)`33B%bY$+Zm76g zWBBaO;!Yft*ga34sN49rKzCuQ)PcU9u_u+mt+C!JPe&>xVfCX_Y4IpE^A2@*FH?uN zy;-(F3M=go)x|Nle^_fRv$d?18GBV~L=VM_g{{2)hflC=?d1~i4fTI#$7jcx`oE($ zt;7A_&(bn0L4SSphPjwhU)&!S|Cyip88H`91wtAc2Xf9h`CV^!iMqkHRYSUGXna_3 zFM(6+CNERf7vJt)KggAi=;$8@z&_C>f1-nasn>tim=Xr~FLi%a;}@EeA5W;q`5EaK+aZIoP zDtDNY)wK^yO;b1lmCJ(!&PgBGEP4ZV0L$fQ6PDw4G)v<)`o z_7_KOp#-+h6^;$uM$$=i51W2J_n=z;I&FLX zzcu|yV59y&eUs7u?bFl4{eREWUQGYT2IWEXA2k1=9onHC+Mzvj`@aAH0RR8<&(ur+ Gr~m-wE^5{Q diff --git a/charts/v1.23.3/blob-csi-driver/values.yaml b/charts/v1.23.3/blob-csi-driver/values.yaml index 70d51e7a9..35f88feca 100644 --- a/charts/v1.23.3/blob-csi-driver/values.yaml +++ b/charts/v1.23.3/blob-csi-driver/values.yaml @@ -119,7 +119,7 @@ node: installBlobfuse: true blobfuseVersion: "1.4.5" installBlobfuse2: true - blobfuse2Version: "2.1.2" + blobfuse2Version: "2.2.0" setMaxOpenFileNum: true maxOpenFileNum: "9000000" disableUpdateDB: true diff --git a/deploy/csi-blob-node.yaml b/deploy/csi-blob-node.yaml index d2f76c455..3203f029e 100644 --- a/deploy/csi-blob-node.yaml +++ b/deploy/csi-blob-node.yaml @@ -58,7 +58,7 @@ spec: - name: INSTALL_BLOBFUSE2 value: "true" - name: BLOBFUSE2_VERSION - value: "2.1.2" + value: "2.2.0" - name: SET_MAX_OPEN_FILE_NUM value: "true" - name: MAX_FILE_NUM diff --git a/deploy/v1.22.5/csi-blob-node.yaml b/deploy/v1.22.5/csi-blob-node.yaml index bd5a35724..673ae9439 100644 --- a/deploy/v1.22.5/csi-blob-node.yaml +++ b/deploy/v1.22.5/csi-blob-node.yaml @@ -58,7 +58,7 @@ spec: - name: INSTALL_BLOBFUSE2 value: "true" - name: BLOBFUSE2_VERSION - value: "2.0.5" + value: "2.2.0" - name: SET_MAX_OPEN_FILE_NUM value: "true" - name: MAX_FILE_NUM diff --git a/deploy/v1.23.3/csi-blob-node.yaml b/deploy/v1.23.3/csi-blob-node.yaml index 78e675987..03708091f 100644 --- a/deploy/v1.23.3/csi-blob-node.yaml +++ b/deploy/v1.23.3/csi-blob-node.yaml @@ -58,7 +58,7 @@ spec: - name: INSTALL_BLOBFUSE2 value: "true" - name: BLOBFUSE2_VERSION - value: "2.1.2" + value: "2.2.0" - name: SET_MAX_OPEN_FILE_NUM value: "true" - name: MAX_FILE_NUM From c4cb4df9704caa25a3247b25c837734c308ebd89 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 5 Feb 2024 21:18:03 +0800 Subject: [PATCH 065/157] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ffcee25ad..df9e404d8 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ This option does not depend on cloud provider config file, supports cross subscr > > Execute following command to install a specific version of blobfuse v2 once driver is running on the agent node: > ```console -> kubectl patch daemonset csi-blob-node -n kube-system -p '{"spec":{"template":{"spec":{"initContainers":[{"env":[{"name":"INSTALL_BLOBFUSE2","value":"true"},{"name":"BLOBFUSE2_VERSION","value":"2.1.2"}],"name":"install-blobfuse-proxy"}]}}}}' +> kubectl patch daemonset csi-blob-node -n kube-system -p '{"spec":{"template":{"spec":{"initContainers":[{"env":[{"name":"INSTALL_BLOBFUSE2","value":"true"},{"name":"BLOBFUSE2_VERSION","value":"2.2.0"}],"name":"install-blobfuse-proxy"}]}}}}' > ``` > > Execute following command to install a specific version of blobfuse v1 once driver is running on the agent node: From 045d5854df7c99e90c6d5fe8446914cf4d3ec247 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 6 Feb 2024 14:26:21 +0000 Subject: [PATCH 066/157] doc: add this repo to ArtifactHub --- README.md | 1 + charts/artifacthub-repo.yml | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 charts/artifacthub-repo.yml diff --git a/README.md b/README.md index df9e404d8..8c6e5be41 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ ![linux build status](https://github.com/kubernetes-sigs/blob-csi-driver/actions/workflows/linux.yaml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/kubernetes-sigs/blob-csi-driver/badge.svg?branch=master)](https://coveralls.io/github/kubernetes-sigs/blob-csi-driver?branch=master) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fkubernetes-sigs%2Fblob-csi-driver.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fkubernetes-sigs%2Fblob-csi-driver?ref=badge_shield) +[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/blob-csi-driver)](https://artifacthub.io/packages/search?repo=blob-csi-driver) ### About This driver allows Kubernetes to access Azure Storage through one of following methods: diff --git a/charts/artifacthub-repo.yml b/charts/artifacthub-repo.yml new file mode 100644 index 000000000..a44bba0ea --- /dev/null +++ b/charts/artifacthub-repo.yml @@ -0,0 +1,4 @@ +repositoryID: 27852225-ee5d-4ad7-9ee8-0c1f0dc06d4e +owners: + - name: andyzhangx + email: xiazhang@microsoft.com From 0c9beec1f20ef1402368aa408e2c96679277b8a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Feb 2024 16:40:27 +0000 Subject: [PATCH 067/157] chore(deps): bump golang.org/x/net from 0.20.0 to 0.21.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.20.0 to 0.21.0. - [Commits](https://github.com/golang/net/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +- go.sum | 16 +-- vendor/golang.org/x/net/html/token.go | 12 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 36 ++++- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 125 +++++++++--------- .../golang.org/x/sys/windows/env_windows.go | 17 ++- .../x/sys/windows/syscall_windows.go | 3 +- vendor/modules.txt | 8 +- 40 files changed, 245 insertions(+), 97 deletions(-) diff --git a/go.mod b/go.mod index 47c26bbb0..e6d1999b1 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 go.uber.org/mock v0.4.0 - golang.org/x/net v0.20.0 + golang.org/x/net v0.21.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 k8s.io/api v0.29.0 @@ -128,12 +128,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.18.0 // indirect + golang.org/x/crypto v0.19.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect diff --git a/go.sum b/go.sum index b2d44af03..6349770f2 100644 --- a/go.sum +++ b/go.sum @@ -325,8 +325,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -343,8 +343,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -364,13 +364,13 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938a..3c57880d6 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc..e2b298d85 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020e..fdcaa974d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8df..36bf8399f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf..42ff8c3c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e4112..dca436004 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560..5cca668ac 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25..d8cae6d15 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e..28e39afdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a..cd66e92cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d75..c1595eba7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d..ee9456b0d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f..8cfca81e1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d6..60b0deb3a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72..f90aa7281 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2..ba9e01503 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed..07cdfd6e9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec..2f1dd214a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da..f40519d90 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbdd..0cc3ce496 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504..856d92d69 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf2467..8d467094c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e2..edc173244 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e..445eba206 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef..adba01bca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd..014c4e9c7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca81..ccc97d74d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d39..ec2b64a95 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e3747..21a839e33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c8..c11121ec3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e591..909b631fc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a..e49bed16e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc..66017d2d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc51..47bab18dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff..dc0c955ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3399,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad19250..d4577a423 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708cc..6395a031d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/modules.txt b/vendor/modules.txt index f2edf8dbe..98fe6087a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -535,7 +535,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.18.0 +# golang.org/x/crypto v0.19.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -556,7 +556,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -579,14 +579,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.17.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.17.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 From 8cb72dbdb63d69762e03d4b8bfe9e1364464c0e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:40:08 +0000 Subject: [PATCH 068/157] chore(deps): bump golangci/golangci-lint-action from 3 to 4 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/static.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index 0445903bc..20a0524c4 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -13,7 +13,7 @@ jobs: go-version: ^1.19 - uses: actions/checkout@master - name: Run linter - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.54 args: -E=gofmt,unused,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s From 756056ff4264c265b52e58709ac9b2a3e0a9d439 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 12 Feb 2024 03:10:02 +0000 Subject: [PATCH 069/157] feat: upgrade to aznfs 2.0.4 version for nfs mount --- pkg/blobplugin/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index b82c1978e..5699eb4bb 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -36,7 +36,7 @@ RUN chmod +x /blobfuse-proxy/init.sh && \ RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca-certificates uuid-dev util-linux mount udev wget e2fsprogs nfs-common netbase procps conntrack iptables bind9-host iproute2 bash netcat-traditional sysvinit-utils kmod # install aznfs -ARG aznfsVer=2.0.3 +ARG aznfsVer=2.0.4 RUN if [ "$ARCH" = "amd64" ] ; then \ wget -O aznfs.tar.gz https://github.com/Azure/AZNFS-mount/releases/download/${aznfsVer}/aznfs-${aznfsVer}-1.x86_64.tar.gz; \ else \ From c320a093bf8cec8b8c842b547cc01b83da4653e5 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 14 Feb 2024 03:54:06 +0000 Subject: [PATCH 070/157] cleanup: remove integration test --- Makefile | 4 - test/integration/README.md | 37 --------- test/integration/integration_test.go | 82 ------------------- test/integration/run-test.sh | 100 ----------------------- test/integration/run-tests-all-clouds.sh | 42 ---------- 5 files changed, 265 deletions(-) delete mode 100644 test/integration/README.md delete mode 100644 test/integration/integration_test.go delete mode 100755 test/integration/run-test.sh delete mode 100755 test/integration/run-tests-all-clouds.sh diff --git a/Makefile b/Makefile index 96e2f8c61..369d260f8 100644 --- a/Makefile +++ b/Makefile @@ -71,10 +71,6 @@ unit-test: sanity-test: blob go test -v -timeout=30m ./test/sanity -.PHONY: integration-test -integration-test: blob - go test -v -timeout=30m ./test/integration - .PHONY: e2e-test e2e-test: install-ginkgo if [ ! -z "$(EXTERNAL_E2E_TEST_BLOBFUSE)" ] || [ ! -z "$(EXTERNAL_E2E_TEST_BLOBFUSE_v2)" ] || [ ! -z "$(EXTERNAL_E2E_TEST_NFS)" ]; then \ diff --git a/test/integration/README.md b/test/integration/README.md deleted file mode 100644 index 5ec1db660..000000000 --- a/test/integration/README.md +++ /dev/null @@ -1,37 +0,0 @@ -## Integration Test -Integration test verifies the functionality of CSI driver as a standalone server outside Kubernetes. It exercises the lifecycle of the volume by creating, attaching, staging, mounting volumes and the reverse operations. - -## Run Integration Tests Locally -### Prerequisite - - Make sure `GOPATH` is set and [csc](https://github.com/rexray/gocsi/tree/master/csc) tool is installed under `$GOPATH/bin/csc` -``` -export set GOPATH=$HOME/go -go get github.com/rexray/gocsi/csc -cd $GOPATH/src/github.com/rexray/gocsi/csc -make build -export PATH=$PATH:$GOROOT/bin:$GOPATH/bin -``` - - - Set Azure credentials by environment variables - > You could get these variables from `/etc/kubernetes/azure.json` on a kubernetes cluster node -``` -# Required environment variables: -export set AZURE_TENANT_ID= -export set AZURE_SUBSCRIPTION_ID= -export set AZURE_CLIENT_ID= -export set AZURE_CLIENT_SECRET= - -# Optional environment variables: -# If the the test is not for the public Azure, e.g. Azure China Cloud, then you need to set AZURE_CLOUD_NAME and AZURE_LOCATION. -# For Azure Stack Clound, you need to set AZURE_ENVIRONMENT_FILEPATH for your cloud environment. -# If you have an existing resource group created for the test, then you need to set variable AZURE_RESOURCE_GROUP. -export set AZURE_CLOUD_NAME= -export set AZURE_LOCATION= -export set AZURE_ENVIRONMENT_FILEPATH= -export set AZURE_RESOURCE_GROUP= -``` - -### Run integration tests -``` -make test-integration -``` diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go deleted file mode 100644 index c55d7989c..000000000 --- a/test/integration/integration_test.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "log" - "os" - "os/exec" - "strings" - "testing" - - "sigs.k8s.io/blob-csi-driver/test/utils/azure" - "sigs.k8s.io/blob-csi-driver/test/utils/credentials" - - "github.com/stretchr/testify/assert" -) - -func TestIntegration(t *testing.T) { - creds, err := credentials.CreateAzureCredentialFile() - defer func() { - err := credentials.DeleteAzureCredentialFile() - assert.NoError(t, err) - }() - assert.NoError(t, err) - assert.NotNil(t, creds) - - os.Setenv("AZURE_CREDENTIAL_FILE", credentials.TempAzureCredentialFilePath) - - azureClient, err := azure.GetClient(creds.Cloud, creds.SubscriptionID, creds.AADClientID, creds.TenantID, creds.AADClientSecret) - assert.NoError(t, err) - - ctx := context.Background() - // Create an empty resource group for integration test - log.Printf("Creating resource group %s in %s", creds.ResourceGroup, creds.Cloud) - _, err = azureClient.EnsureResourceGroup(ctx, creds.ResourceGroup, creds.Location, nil) - assert.NoError(t, err) - defer func() { - // Only delete resource group the test created - if strings.HasPrefix(creds.ResourceGroup, credentials.ResourceGroupPrefix) { - log.Printf("Deleting resource group %s", creds.ResourceGroup) - err := azureClient.DeleteResourceGroup(ctx, creds.ResourceGroup) - assert.NoError(t, err) - } - }() - - // Execute the script from project root - err = os.Chdir("../..") - assert.NoError(t, err) - // Change directory back to test/integration in preparation for next test - defer func() { - err := os.Chdir("test/integration") - assert.NoError(t, err) - }() - - cwd, err := os.Getwd() - assert.NoError(t, err) - assert.True(t, strings.HasSuffix(cwd, "blob-csi-driver")) - - // Pass in resource group name, storage account name and cloud type - cmd := exec.Command("./test/integration/run-tests-all-clouds.sh", creds.ResourceGroup, creds.Cloud) - cmd.Dir = cwd - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - t.Fatalf("Integration test failed %v", err) - } -} diff --git a/test/integration/run-test.sh b/test/integration/run-test.sh deleted file mode 100755 index 76ec62517..000000000 --- a/test/integration/run-test.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -euo pipefail - -function cleanup { - echo "pkill -f blobplugin" - pkill -f blobplugin -} - -t=$(date +%s) -readonly volname="citest-$t" -readonly volsize="2147483648" -readonly expanded_vol_size="2147483650" -readonly endpoint="$1" -readonly staging_target_path="$2" -readonly target_path="$3" -readonly resource_group="$4" -readonly cloud="$5" - -echo "Begin to run integration test on $cloud..." - -# Run CSI driver as a background service -_output/amd64/blobplugin --endpoint "$endpoint" --nodeid CSINode --enable-blob-mock-mount -v=5 & -trap cleanup EXIT - -if [[ "$cloud" == "AzureChinaCloud" ]]; then - sleep 25 -else - sleep 5 -fi - -# Begin to run CSI functions one by one -echo "Create volume test:" -value="$(csc controller new --endpoint "$endpoint" --cap MULTI_NODE_MULTI_WRITER,mount,xfs "$volname" --req-bytes "$volsize" --params skuname=Standard_LRS)" -sleep 15 - -volumeid="$(echo "$value" | awk '{print $1}' | sed 's/"//g')" -echo "Got volume id: $volumeid" -storage_account_name="$(echo "$volumeid" | awk -F# '{print $2}')" - -csc controller validate-volume-capabilities --endpoint "$endpoint" --cap MULTI_NODE_MULTI_WRITER,mount,xfs "$volumeid" - -if [[ "$cloud" != "AzureChinaCloud" ]]; then - echo "stage volume test:" - csc node stage --endpoint "$endpoint" --cap MULTI_NODE_MULTI_WRITER,mount,xfs --staging-target-path "$staging_target_path" "$volumeid" - - echo "publish volume test:" - csc node publish --endpoint "$endpoint" --cap MULTI_NODE_MULTI_WRITER,mount,xfs --staging-target-path "$staging_target_path" --target-path "$target_path" "$volumeid" - sleep 2 - - echo "node stats test:" - csc node stats --endpoint "$endpoint" "$volumeid:$target_path:$staging_target_path" - sleep 2 - - echo 'expand volume test' - csc controller expand-volume --endpoint "$endpoint" --req-bytes "$expanded_vol_size" "$volumeid" - - echo "unpublish volume test:" - csc node unpublish --endpoint "$endpoint" --target-path "$target_path" "$volumeid" - sleep 2 - - echo "unstage volume test:" - csc node unstage --endpoint "$endpoint" --staging-target-path "$staging_target_path" "$volumeid" - sleep 2 -fi - -echo "Delete volume test:" -csc controller del --endpoint "$endpoint" "$volumeid" -sleep 15 - -echo "Create volume in storage account($storage_account_name) under resource group($resource_group):" -value="$(csc controller new --endpoint "$endpoint" --cap MULTI_NODE_MULTI_WRITER,mount,xfs "$volname" --req-bytes "$volsize" --params skuname=Standard_LRS,storageAccount=$storage_account_name,resourceGroup=$resource_group)" -sleep 15 - -volumeid="$(echo "$value" | awk '{print $1}' | sed 's/"//g')" -echo "got volume id: $volumeid" - -echo "Delete volume test:" -csc controller del --endpoint "$endpoint" "$volumeid" -sleep 15 - -csc identity plugin-info --endpoint "$endpoint" -csc node get-info --endpoint "$endpoint" - -echo "Integration test on $cloud is completed." diff --git a/test/integration/run-tests-all-clouds.sh b/test/integration/run-tests-all-clouds.sh deleted file mode 100755 index 18d9fb5d4..000000000 --- a/test/integration/run-tests-all-clouds.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -GO111MODULE=off go get github.com/rexray/gocsi/csc - -function install_blobfuse_bin { - echo 'Installing blobfuse...' - apt-get update && apt install ca-certificates pkg-config libfuse-dev cmake libcurl4-gnutls-dev libgnutls28-dev uuid-dev libgcrypt20-dev wget -y - wget https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb - dpkg -i packages-microsoft-prod.deb - apt-get update && apt install blobfuse fuse -y - rm -f packages-microsoft-prod.deb -} - -readonly resource_group="$1" -readonly cloud="$2" - -# copy blobfuse binary -if [[ "$cloud" == "AzureStackCloud" ]]; then - install_blobfuse_bin -else - mkdir -p /usr/blob - cp test/artifacts/blobfuse /usr/bin/blobfuse -fi - -apt update && apt install libfuse2 -y -test/integration/run-test.sh "tcp://127.0.0.1:10000" "/tmp/stagingtargetpath" "/tmp/targetpath" "$resource_group" "$cloud" From 2681d4e9529a56d465fcfaec37717f38b1c9333d Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 15 Feb 2024 03:07:01 +0000 Subject: [PATCH 071/157] test: fix goveralls --- .github/workflows/linux.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/linux.yaml b/.github/workflows/linux.yaml index 011ac4c03..d8b6bc40e 100644 --- a/.github/workflows/linux.yaml +++ b/.github/workflows/linux.yaml @@ -25,7 +25,7 @@ jobs: sudo dpkg -i packages-microsoft-prod.deb sudo apt-get update sudo apt-get install blobfuse - go test -covermode=count -coverprofile=profile.cov ./pkg/... + go test -race -covermode=atomic -coverprofile=profile.cov ./pkg/... - name: Run build test run: | @@ -42,9 +42,10 @@ jobs: export PATH=$PATH:$HOME/.local/bin make blobfuse-proxy - - name: Get code coverage + - name: Install goveralls + run: go install github.com/mattn/goveralls@latest + + - name: Send coverage env: - COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - GO111MODULE=off go get github.com/mattn/goveralls - $(go env GOPATH)/bin/goveralls -coverprofile=profile.cov -service=github + COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: goveralls -coverprofile=profile.cov -service=github From d71250adde44882bd81c356a177b6cf3db20e1c9 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 18 Feb 2024 14:17:48 +0000 Subject: [PATCH 072/157] fix: panic when controller does not have cloud config --- pkg/blob/azure.go | 17 +++++++- pkg/blob/azure_test.go | 85 ++++++++++++++++++++++++++++++++++++ pkg/blob/controllerserver.go | 12 ++--- pkg/blob/nodeserver.go | 7 +-- 4 files changed, 106 insertions(+), 15 deletions(-) diff --git a/pkg/blob/azure.go b/pkg/blob/azure.go index a99a59653..13a2425bc 100644 --- a/pkg/blob/azure.go +++ b/pkg/blob/azure.go @@ -25,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest" + azure2 "github.com/Azure/go-autorest/autorest/azure" "golang.org/x/net/context" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -159,7 +160,7 @@ func (d *Driver) initializeKvClient() (*kv.BaseClient, error) { // getKeyvaultToken retrieves a new service principal token to access keyvault func (d *Driver) getKeyvaultToken() (authorizer autorest.Authorizer, err error) { - env := d.cloud.Environment + env := d.getCloudEnvironment() kvEndPoint := strings.TrimSuffix(env.KeyVaultEndpoint, "/") servicePrincipalToken, err := providerconfig.GetServicePrincipalToken(&d.cloud.AzureAuthConfig, &env, kvEndPoint) if err != nil { @@ -236,3 +237,17 @@ func (d *Driver) updateSubnetServiceEndpoints(ctx context.Context, vnetResourceG return nil } + +func (d *Driver) getStorageEndPointSuffix() string { + if d.cloud == nil || d.cloud.Environment.StorageEndpointSuffix == "" { + return defaultStorageEndPointSuffix + } + return d.cloud.Environment.StorageEndpointSuffix +} + +func (d *Driver) getCloudEnvironment() azure2.Environment { + if d.cloud == nil { + return azure2.PublicCloud + } + return d.cloud.Environment +} diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 3dda37553..9643c3a58 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -375,3 +375,88 @@ func TestUpdateSubnetServiceEndpoints(t *testing.T) { t.Run(tc.name, tc.testFunc) } } + +func TestGetStorageEndPointSuffix(t *testing.T) { + d := NewFakeDriver() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + tests := []struct { + name string + cloud *azureprovider.Cloud + expectedSuffix string + }{ + { + name: "nil cloud", + cloud: nil, + expectedSuffix: "core.windows.net", + }, + { + name: "empty cloud", + cloud: &azureprovider.Cloud{}, + expectedSuffix: "core.windows.net", + }, + { + name: "cloud with storage endpoint suffix", + cloud: &azureprovider.Cloud{ + Environment: azure.Environment{ + StorageEndpointSuffix: "suffix", + }, + }, + expectedSuffix: "suffix", + }, + { + name: "public cloud", + cloud: &azureprovider.Cloud{ + Environment: azure.PublicCloud, + }, + expectedSuffix: "core.windows.net", + }, + { + name: "china cloud", + cloud: &azureprovider.Cloud{ + Environment: azure.ChinaCloud, + }, + expectedSuffix: "core.chinacloudapi.cn", + }, + } + + for _, test := range tests { + d.cloud = test.cloud + suffix := d.getStorageEndPointSuffix() + assert.Equal(t, test.expectedSuffix, suffix, test.name) + } +} + +func TestGetCloudEnvironment(t *testing.T) { + d := NewFakeDriver() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + tests := []struct { + name string + cloud *azureprovider.Cloud + expectedEnv azure.Environment + }{ + { + name: "nil cloud", + cloud: nil, + expectedEnv: azure.PublicCloud, + }, + { + name: "cloud with environment", + cloud: &azureprovider.Cloud{ + Environment: azure.ChinaCloud, + }, + expectedEnv: azure.ChinaCloud, + }, + } + + for _, test := range tests { + d.cloud = test.cloud + env := d.getCloudEnvironment() + assert.Equal(t, test.expectedEnv, env, test.name) + } +} diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index e5c503de0..009396878 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -309,11 +309,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) } if strings.TrimSpace(storageEndpointSuffix) == "" { - if d.cloud.Environment.StorageEndpointSuffix != "" { - storageEndpointSuffix = d.cloud.Environment.StorageEndpointSuffix - } else { - storageEndpointSuffix = defaultStorageEndPointSuffix - } + storageEndpointSuffix = d.getStorageEndPointSuffix() } accountOptions := &azure.AccountOptions{ @@ -558,7 +554,7 @@ func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.Valida var exist bool secrets := req.GetSecrets() if len(secrets) > 0 { - container, err := getContainerReference(containerName, secrets, d.cloud.Environment) + container, err := getContainerReference(containerName, secrets, d.getCloudEnvironment()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -677,7 +673,7 @@ func (d *Driver) CreateBlobContainer(ctx context.Context, subsID, resourceGroupN return wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) { var err error if len(secrets) > 0 { - container, getErr := getContainerReference(containerName, secrets, d.cloud.Environment) + container, getErr := getContainerReference(containerName, secrets, d.getCloudEnvironment()) if getErr != nil { return true, getErr } @@ -714,7 +710,7 @@ func (d *Driver) DeleteBlobContainer(ctx context.Context, subsID, resourceGroupN return wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) { var err error if len(secrets) > 0 { - container, getErr := getContainerReference(containerName, secrets, d.cloud.Environment) + container, getErr := getContainerReference(containerName, secrets, d.getCloudEnvironment()) if getErr != nil { return true, getErr } diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 37c1f8860..94a98bdf3 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -30,7 +30,6 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" - "github.com/Azure/azure-sdk-for-go/storage" "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/apimachinery/pkg/util/wait" @@ -334,11 +333,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe containerName = replaceWithMap(containerName, containerNameReplaceMap) if strings.TrimSpace(storageEndpointSuffix) == "" { - if d.cloud.Environment.StorageEndpointSuffix != "" { - storageEndpointSuffix = d.cloud.Environment.StorageEndpointSuffix - } else { - storageEndpointSuffix = storage.DefaultBaseURL - } + storageEndpointSuffix = d.getStorageEndPointSuffix() } if strings.TrimSpace(serverAddress) == "" { From cad8c38678046882a76d938e5a819798da4d4670 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 20 Feb 2024 06:25:41 +0000 Subject: [PATCH 073/157] fix: match tags issue in account search logic --- go.mod | 32 +- go.sum | 54 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 10 + .../sdk/azcore/internal/exported/request.go | 86 ++- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/runtime/request.go | 6 +- .../sdk/azidentity/CHANGELOG.md | 39 +- .../sdk/azidentity/MIGRATION.md | 6 +- .../azure-sdk-for-go/sdk/azidentity/README.md | 40 +- .../sdk/azidentity/TOKEN_CACHING.MD | 70 ++ .../sdk/azidentity/TROUBLESHOOTING.md | 46 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/authentication_record.go | 95 +++ .../sdk/azidentity/azidentity.go | 51 +- .../sdk/azidentity/azure_cli_credential.go | 70 +- .../azure_developer_cli_credential.go | 169 +++++ .../azure-sdk-for-go/sdk/azidentity/ci.yml | 1 + .../azidentity/client_assertion_credential.go | 26 +- .../client_certificate_credential.go | 25 +- .../azidentity/client_secret_credential.go | 24 +- .../sdk/azidentity/confidential_client.go | 32 +- .../azidentity/default_azure_credential.go | 16 +- .../azidentity/developer_credential_util.go | 38 + .../sdk/azidentity/device_code_credential.go | 54 +- .../sdk/azidentity/environment_credential.go | 4 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 14 +- .../azure-sdk-for-go/sdk/azidentity/go.work | 6 + .../sdk/azidentity/go.work.sum | 39 ++ .../interactive_browser_credential.go | 50 +- .../sdk/azidentity/internal/exported.go | 18 + .../sdk/azidentity/internal/internal.go | 31 + .../sdk/azidentity/managed_identity_client.go | 54 +- .../azidentity/managed_identity_credential.go | 20 +- .../sdk/azidentity/on_behalf_of_credential.go | 17 +- .../sdk/azidentity/public_client.go | 133 +++- .../username_password_credential.go | 40 +- .../sdk/azidentity/version.go | 5 +- .../sdk/azidentity/workload_identity.go | 11 +- .../sdk/internal/exported/exported.go | 5 + .../compute/armcompute/v5/CHANGELOG.md | 8 + .../compute/armcompute/v5/README.md | 2 +- .../compute/armcompute/v5/assets.json | 2 +- .../compute/armcompute/v5/autorest.md | 8 +- .../compute/armcompute/v5/constants.go | 23 +- .../armcompute/v5/diskaccesses_client.go | 54 +- .../v5/diskencryptionsets_client.go | 34 +- .../armcompute/v5/diskrestorepoint_client.go | 20 +- .../compute/armcompute/v5/disks_client.go | 42 +- .../compute/armcompute/v5/models.go | 3 + .../compute/armcompute/v5/models_serde.go | 4 + .../compute/armcompute/v5/snapshots_client.go | 42 +- .../armcontainerservice/v4/CHANGELOG.md | 105 +++ .../armcontainerservice/v4/README.md | 6 +- .../v4/agentpools_client.go | 40 +- .../armcontainerservice/v4/assets.json | 2 +- .../armcontainerservice/v4/autorest.md | 8 +- .../armcontainerservice/v4/constants.go | 2 +- .../v4/maintenanceconfigurations_client.go | 16 +- .../v4/managedclusters_client.go | 130 ++-- .../armcontainerservice/v4/models.go | 4 + .../armcontainerservice/v4/models_serde.go | 4 + .../v4/operations_client.go | 4 +- .../v4/privateendpointconnections_client.go | 18 +- .../v4/privatelinkresources_client.go | 4 +- .../v4/resolveprivatelinkserviceid_client.go | 4 +- .../v4/snapshots_client.go | 24 +- .../v4/trustedaccessrolebindings_client.go | 20 +- .../v4/trustedaccessroles_client.go | 4 +- .../internal/base/internal/storage/storage.go | 90 ++- .../apps/public/public.go | 2 +- .../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 14 +- vendor/github.com/golang-jwt/jwt/v5/ecdsa.go | 4 +- .../github.com/golang-jwt/jwt/v5/ed25519.go | 7 +- .../golang-jwt/jwt/v5/errors_go_other.go | 2 +- vendor/github.com/golang-jwt/jwt/v5/hmac.go | 4 +- vendor/github.com/golang-jwt/jwt/v5/none.go | 2 +- vendor/github.com/golang-jwt/jwt/v5/parser.go | 81 ++- .../golang-jwt/jwt/v5/parser_option.go | 8 + vendor/github.com/golang-jwt/jwt/v5/rsa.go | 4 +- .../github.com/golang-jwt/jwt/v5/rsa_pss.go | 4 +- vendor/github.com/golang-jwt/jwt/v5/token.go | 14 + vendor/github.com/golang-jwt/jwt/v5/types.go | 5 +- .../github.com/golang-jwt/jwt/v5/validator.go | 47 +- vendor/github.com/google/uuid/CHANGELOG.md | 13 + vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/version7.go | 39 +- .../cloud-provider/volume/errors/errors.go | 77 --- .../cloud-provider/volume/helpers/rounding.go | 165 ----- .../cloud-provider/volume/helpers/zones.go | 313 --------- vendor/modules.txt | 41 +- .../pkg/azclient/auth_conf.go | 2 +- .../pkg/azclient/diskclient/custom.go | 45 ++ .../pkg/azclient/diskclient/interface.go | 3 + .../pkg/azclient/factory.go | 1 + .../pkg/azclient/factory_gen.go | 23 +- .../pkg/azclient/mock_azclient/interface.go | 15 + .../pkg/azclient/snapshotclient/interface.go | 2 +- .../virtualmachineclient/interface.go | 1 + .../fileclient/azure_fileclient.go | 7 +- .../cloud-provider-azure/pkg/consts/consts.go | 16 +- .../pkg/provider/azure.go | 122 ++-- .../pkg/provider/azure_controller_common.go | 651 +----------------- .../pkg/provider/azure_controller_standard.go | 34 +- .../pkg/provider/azure_controller_vmss.go | 28 +- .../pkg/provider/azure_controller_vmssflex.go | 31 +- .../pkg/provider/azure_fakes.go | 13 +- .../pkg/provider/azure_instances_v1.go | 8 +- .../pkg/provider/azure_loadbalancer.go | 135 ++-- .../azure_loadbalancer_backendpool.go | 29 +- .../pkg/provider/azure_loadbalancer_repo.go | 11 +- .../pkg/provider/azure_local_services.go | 15 +- .../provider/azure_managedDiskController.go | 461 ------------- .../pkg/provider/azure_mock_vmsets.go | 132 ++-- .../pkg/provider/azure_standard.go | 16 +- .../pkg/provider/azure_storageaccount.go | 23 +- .../pkg/provider/azure_utils.go | 79 ++- .../pkg/provider/azure_vmsets.go | 19 +- .../pkg/provider/azure_vmss.go | 52 +- .../pkg/provider/azure_vmss_cache.go | 23 +- .../pkg/provider/azure_vmss_repo.go | 2 +- .../pkg/provider/azure_vmssflex.go | 24 +- .../provider/virtualmachine/virtualmachine.go | 33 + .../pkg/util/sets/string.go | 100 +++ .../pkg/util/string/string.go} | 16 +- .../cloud-provider-azure/pkg/util/vm/vm.go | 55 ++ 125 files changed, 2555 insertions(+), 2692 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go delete mode 100644 vendor/k8s.io/cloud-provider/volume/errors/errors.go delete mode 100644 vendor/k8s.io/cloud-provider/volume/helpers/rounding.go delete mode 100644 vendor/k8s.io/cloud-provider/volume/helpers/zones.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go delete mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go rename vendor/{k8s.io/cloud-provider/volume/constants.go => sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go} (59%) create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go diff --git a/go.mod b/go.mod index e6d1999b1..40551ca2f 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 @@ -29,24 +29,24 @@ require ( golang.org/x/net v0.21.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 - k8s.io/api v0.29.0 - k8s.io/apimachinery v0.29.0 - k8s.io/client-go v0.29.0 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 k8s.io/component-base v0.29.0 k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 sigs.k8s.io/yaml v1.4.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -56,7 +56,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect @@ -68,7 +68,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-ini/ini v1.67.0 @@ -80,14 +80,14 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230602010524-ada837c32108 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.9 // indirect @@ -104,7 +104,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.0 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 6349770f2..9e716da1b 100644 --- a/go.sum +++ b/go.sum @@ -5,18 +5,18 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 h1:QfV5XZt6iNa2aWMAt96CZEbfJ7kgG/qYIpq465Shr5E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 h1:AAIdAyPkFff6XTct2lQCxOWN/+LnA41S7kIkzKaMbyE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 h1:g65N4m1sAjm0BkjIJYtp5qnJlkoFtd6oqfa27KO9fI4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= @@ -53,8 +53,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -96,8 +96,8 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -128,8 +128,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -155,8 +155,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20230602010524-ada837c32108 h1:y+JfwMOPwQwIrnh3TUPwwtOAhONoppkHiSa4sQBoK2k= github.com/google/pprof v0.0.0-20230602010524-ada837c32108/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -221,8 +221,8 @@ github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -360,9 +360,9 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -466,12 +466,12 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 h1:omR8VXOIPc20bbG2zxImx9WHLPnTXK2uo9q3LNPanMg= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5/go.mod h1:0WCrYlWxqk3/AptztkqPk1r9Gr3IULSHat7LipAA1sI= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b h1:onCsa2FoC9HGIgW+eQYJI8/IZnefwCcU9rF7ZKtD7f0= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b/go.mod h1:seH99Elt7KgWOOonCmzRcB1yLouqK7B7+l8RoSbqaYE= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 h1:0XsdZlKjVI0UZYhvg3VbXCPFRRQS5VL1idrTKgzJjnc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9/go.mod h1:dDc0Ixf5VI01TkTj83ENW1hH5jImGJsdKhQgFRyQsyA= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 h1:Hs8BBRzo/V4Qe8m1NG997vaavBb0WSPzKsxdXtjfUpg= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708/go.mod h1:GJ+Crn1rYpKB21TNlnJQKsRWvIHva7oMApx2RhubFao= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf h1:pyQe3MFQp1Hb8iya4LaZiBEh3M0RgZ/yv0/ws5memP4= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf/go.mod h1:cyVxBbpj/UR4Vi4KhxO3vhzBB2JcN08TngoPQ0J0aWc= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 h1:dKAXbaoIRkknnvirDOb4BZLocx4UVtshb9zg3dgZxaE= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708/go.mod h1:8Y2dnp0FlQcoFllRnUgTZjMykhMAUF7NrPLgw30Ae4I= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 5c8411cb5..7a0a524e3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.9.2 (2024-02-06) + +### Bugs Fixed + +* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header. + +### Other Changes + +* Update to latest version of `internal`. + ## 1.9.1 (2023-12-11) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 659f2a7d2..8d1ae213c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -125,46 +125,11 @@ func (req *Request) OperationValue(value interface{}) bool { // SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length // accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", -// Content-Type won't be set. +// Content-Type won't be set, and if it was set, will be deleted. // Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { - var err error - var size int64 - if body != nil { - size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return err - } - } - if size == 0 { - // treat an empty stream the same as a nil one: assign req a nil body - body = nil - // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content - // (Del is a no-op when the header has no value) - req.req.Header.Del(shared.HeaderContentLength) - } else { - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return err - } - req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) - req.Raw().GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream - return body, err - } - } - // keep a copy of the body argument. this is to handle cases - // where req.Body is replaced, e.g. httputil.DumpRequest and friends. - req.body = body - req.req.Body = body - req.req.ContentLength = size - if contentType == "" { - // Del is a no-op when the header has no value - req.req.Header.Del(shared.HeaderContentType) - } else { - req.req.Header.Set(shared.HeaderContentType, contentType) - } - return nil + // clobber the existing Content-Type to preserve behavior + return SetBody(req, body, contentType, true) } // RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. @@ -211,3 +176,48 @@ type PolicyFunc func(*Request) (*http.Response, error) func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { return pf(req) } + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. +// - req is the request to modify +// - body is the request body; if nil or empty, Content-Length won't be set +// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted +// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType +func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index bb93daee6..8f749f48d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.1" + Version = "v1.9.2" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index e97223da2..5d1569c8d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -97,7 +97,8 @@ func EncodeByteArray(v []byte, format Base64Encoding) string { func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { // send as a JSON string encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) - return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) } // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. @@ -106,7 +107,8 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error { if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) } - return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) } // MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 7ea119ab3..71dcb5f3e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,38 @@ # Release History +## 1.5.1 (2024-01-17) + +### Bugs Fixed +* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly + +## 1.5.0 (2024-01-16) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +* Removed persistent token caching. It will return in v1.6.0-beta.1 + +### Bugs Fixed +* Credentials now preserve MSAL headers e.g. X-Client-Sku + +### Other Changes +* Upgraded dependencies + +## 1.5.0-beta.2 (2023-11-07) + +### Features Added +* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity +* Added spans for distributed tracing. + +## 1.5.0-beta.1 (2023-10-10) + +### Features Added +* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions` + on a credential's options to enable and configure this. See the package documentation for + this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more + details. +* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This + credential is also part of the `DefaultAzureCredential` authentication flow. + ## 1.4.0 (2023-10-10) ### Bugs Fixed @@ -94,14 +127,14 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). - This indicates to Azure Active Directory that your application can handle CAE claims challenges. + This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) * Service principal and user credentials support ADFS authentication on Azure Stack. Specify "adfs" as the credential's tenant. * Applications running in private or disconnected clouds can prevent credentials from - requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery` field on credential options. * Many credentials can now be configured to authenticate in multiple tenants. The options types for these credentials have an `AdditionallyAllowedTenants` field @@ -454,4 +487,4 @@ ## 0.1.0 (2020-07-23) ### Features Added -* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. +* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 4ac53eb7b..1a6492023 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -18,7 +18,7 @@ This guide shows common authentication code using `autorest/adal` and its equiva ### `autorest/adal` -Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant: ```go import "github.com/Azure/go-autorest/autorest/adal" @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index da0baa9ad..b6ad2d39f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -35,6 +35,12 @@ signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in t When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. +#### Authenticate via the Azure Developer CLI + +Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally. + +To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow. + ## Key concepts ### Credentials @@ -44,9 +50,7 @@ service client to authenticate requests. Service clients across the Azure SDK accept a credential instance when they are constructed, and use that credential to authenticate requests. -The `azidentity` module focuses on OAuth authentication with Azure Active -Directory (AAD). It offers a variety of credential types capable of acquiring -an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. +The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. ### DefaultAzureCredential @@ -58,6 +62,7 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T 1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. 1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. 1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. > Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. @@ -152,6 +157,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI +|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables @@ -161,16 +167,16 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_SECRET`|one of the application's client secrets #### Service principal with certificate |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key |`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any @@ -178,22 +184,30 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application |`AZURE_USERNAME`|a username (usually an email address) |`AZURE_PASSWORD`|that user's password Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used. +## Token caching + +Token caching is an `azidentity` feature that allows apps to: + +* Cache tokens in memory (default) or on disk (opt-in). +* Improve resilience and performance. +* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. + +For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching). + ## Troubleshooting ### Error Handling Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Azure Active Directory errors please refer to the -Azure Active Directory -[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD new file mode 100644 index 000000000..c0d660146 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -0,0 +1,70 @@ +## Token caching in the Azure Identity client module + +*Token caching* is a feature provided by the Azure Identity library that allows apps to: + +- Improve their resilience and performance. +- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times the user is prompted to authenticate. + +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. + +Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. + +### In-memory token caching + +*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. + +**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. + +#### Caching cannot be disabled + +As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. + +### Persistent token caching + +> Only azidentity v1.5.0-beta versions support persistent token caching + +*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. + +| Operating system | Storage mechanism | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | DPAPI | + +By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. +However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. + +With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: + +- Makes the app more resilient to failures. +- Ensures the app can continue to function during an Entra ID outage or disruption. +- Avoids having to prompt users to authenticate each time the process is restarted. + +>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. + +#### Example code + +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. + +### Credentials supporting token caching + +The following table indicates the state of in-memory and persistent caching in each credential type. + +**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). + +| Credential | In-memory token caching | Persistent token caching | +|--------------------------------|---------------------------------------------------------------------|--------------------------| +| `AzureCLICredential` | Not Supported | Not Supported | +| `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `ClientAssertionCredential` | Supported | Supported | +| `ClientCertificateCredential` | Supported | Supported | +| `ClientSecretCredential` | Supported | Supported | +| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported | +| `DeviceCodeCredential` | Supported | Supported | +| `EnvironmentCredential` | Supported | Not Supported | +| `InteractiveBrowserCredential` | Supported | Supported | +| `ManagedIdentityCredential` | Supported | Not Supported | +| `OnBehalfOfCredential` | Supported | Supported | +| `UsernamePasswordCredential` | Supported | Supported | +| `WorkloadIdentityCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index fef099813..832c599eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,7 +8,8 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) -- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -23,7 +24,7 @@ This troubleshooting guide covers failure investigation techniques, common error ## Handle azidentity errors -Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable. ### Permission issues @@ -31,7 +32,7 @@ Service client errors with a status code of 401 or 403 often indicate that authe ## Find relevant information in errors -Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: +Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message: ``` ClientSecretCredential authentication failed @@ -57,9 +58,9 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. -- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. ### Enable and configure logging @@ -96,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -172,7 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). -## Troubleshoot AzureCliCredential authentication issues +## Troubleshoot AzureCLICredential authentication issues | Error Message |Description| Mitigation | |---|---|---| @@ -195,6 +196,29 @@ az account get-access-token --output json --resource https://management.core.win > This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Troubleshoot AzureDeveloperCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|

  • Ensure the Azure Developer CLI is properly installed. See the installation instructions at [Install or update the Azure Developer CLI](https://learn.microsoft.com/azure/developer/azure-developer-cli/install-azd).
  • Validate the installation location has been added to the `PATH` environment variable.
| +|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|
  • Log in to the Azure Developer CLI using the `azd login` command.
  • Validate that the Azure Developer CLI can obtain tokens. For instructions, see [Verify the Azure Developer CLI can obtain tokens](#verify-the-azure-developer-cli-can-obtain-tokens).
| + +#### Verify the Azure Developer CLI can obtain tokens + +You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI. + +```sh +azd config list +``` + +Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account. + +```sh +azd auth token --output json --scope https://management.core.windows.net/.default +``` +>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security. + ## Troubleshoot `WorkloadIdentityCredential` authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 47e77f88e..173ce2b3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_6225ab0470" + "Tag": "go/azidentity_db4a26f583" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go new file mode 100644 index 000000000..ada4d6501 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +var supportedAuthRecordVersions = []string{"1.0"} + +// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication +// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. +type authenticationRecord struct { + // Authority is the URL of the authority that issued the token. + Authority string `json:"authority"` + + // ClientID is the ID of the application that authenticated the user. + ClientID string `json:"clientId"` + + // HomeAccountID uniquely identifies the account. + HomeAccountID string `json:"homeAccountId"` + + // TenantID identifies the tenant in which the user authenticated. + TenantID string `json:"tenantId"` + + // Username is the user's preferred username. + Username string `json:"username"` + + // Version of the AuthenticationRecord. + Version string `json:"version"` +} + +// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord +func (a *authenticationRecord) UnmarshalJSON(b []byte) error { + // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we + // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally + // different type enables this by assigning all the fields without recursing into this method. + type r authenticationRecord + err := json.Unmarshal(b, (*r)(a)) + if err != nil { + return err + } + if a.Version == "" { + return errors.New("AuthenticationRecord must have a version") + } + for _, v := range supportedAuthRecordVersions { + if a.Version == v { + return nil + } + } + return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions) +} + +// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. +func (a *authenticationRecord) account() public.Account { + return public.Account{ + Environment: a.Authority, + HomeAccountID: a.HomeAccountID, + PreferredUsername: a.Username, + } +} + +func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { + u, err := url.Parse(ar.IDToken.Issuer) + if err != nil { + return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + } + tenant := ar.IDToken.TenantID + if tenant == "" { + tenant = strings.Trim(u.Path, "/") + } + username := ar.IDToken.PreferredUsername + if username == "" { + username = ar.IDToken.UPN + } + return authenticationRecord{ + Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), + ClientID: ar.IDToken.Audience, + HomeAccountID: ar.Account.HomeAccountID, + TenantID: tenant, + Username: username, + Version: "1.0", + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 10b742ce1..c3bcfb56c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -15,12 +15,12 @@ import ( "net/http" "net/url" "os" - "regexp" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -41,6 +41,10 @@ const ( organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + + traceNamespace = "Microsoft.Entra" + traceOpGetToken = "GetToken" + traceOpAuthenticate = "Authenticate" ) var ( @@ -49,6 +53,9 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) +// tokenCachePersistenceOptions contains options for persistent token caching +type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions + // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user // 2. value of AZURE_AUTHORITY_HOST @@ -109,29 +116,20 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } -// validTenantID return true is it receives a valid tenantID, returns false otherwise -func validTenantID(tenantID string) bool { - match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) - if err != nil { - return false - } - return match -} - -func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { - pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) - return pipelineAdapter{pl: pl} -} - -type pipelineAdapter struct { - pl runtime.Pipeline +func alphanumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') } -func (p pipelineAdapter) CloseIdleConnections() { - // do nothing +func validTenantID(tenantID string) bool { + for _, r := range tenantID { + if !(alphanumeric(r) || r == '.' || r == '-') { + return false + } + } + return true } -func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { +func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) { req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) if err != nil { return nil, err @@ -153,7 +151,18 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { return nil, err } } - resp, err := p.pl.Do(req) + + // copy headers to the new request, ignoring any for which the new request has a value + h := req.Raw().Header + for key, vals := range r.Header { + if _, has := h[key]; !has { + for _, val := range vals { + h.Add(key, val) + } + } + } + + resp, err := client.Pipeline().Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 55a0d6543..43577ab3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "regexp" "runtime" "strings" "sync" @@ -25,13 +24,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) -const ( - credNameAzureCLI = "AzureCLICredential" - timeoutCLIRequest = 10 * time.Second -) +const credNameAzureCLI = "AzureCLICredential" -// used by tests to fake invoking the CLI -type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) +type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { @@ -39,17 +34,25 @@ type AzureCLICredentialOptions struct { // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the // logged in account can access. AdditionallyAllowedTenants []string + + // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // than the Azure CLI's current account. + subscription string + // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. TenantID string - tokenProvider azureCLITokenProvider + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking az + tokenProvider azTokenProvider } // init returns an instance of AzureCLICredentialOptions initialized with default values. func (o *AzureCLICredentialOptions) init() { if o.tokenProvider == nil { - o.tokenProvider = defaultTokenProvider + o.tokenProvider = defaultAzTokenProvider } } @@ -65,6 +68,14 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } + for _, r := range cp.subscription { + if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + } + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } cp.init() cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil @@ -73,50 +84,51 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent // GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. // This method is called automatically by Azure SDK clients. func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} if len(opts.Scopes) != 1 { - return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + if !validScope(opts.Scopes[0]) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0]) } tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) if err != nil { - return azcore.AccessToken{}, err + return at, err } - // pass the CLI an AAD v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes - opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes[0], tenant) - if err != nil { - return azcore.AccessToken{}, err + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + if err == nil { + at, err = c.createAccessToken(b) } - at, err := c.createAccessToken(b) if err != nil { - return azcore.AccessToken{}, err + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err } msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) log.Write(EventAuthentication, msg) return at, nil } -var defaultTokenProvider azureCLITokenProvider = func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) - } - +// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(scopes[0], defaultSuffix) // set a default timeout for this authentication iff the application hasn't done so already var cancel context.CancelFunc if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + ctx, cancel = context.WithTimeout(ctx, cliTimeout) defer cancel() } - commandLine := "az account get-access-token -o json --resource " + resource if tenantID != "" { commandLine += " --tenant " + tenantID } + if subscription != "" { + // subscription needs quotes because it may contain spaces + commandLine += ` --subscription "` + subscription + `"` + } var cliCmd *exec.Cmd if runtime.GOOS == "windows" { dir := os.Getenv("SYSTEMROOT") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go new file mode 100644 index 000000000..cbe7c4c2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + +type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) + +// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. +type AzureDeveloperCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, + // which is the tenant of the selected Azure subscription. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking azd + tokenProvider azdTokenProvider +} + +// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. +// +// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview +type AzureDeveloperCLICredential struct { + mu *sync.Mutex + opts AzureDeveloperCLICredentialOptions +} + +// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options. +func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) { + cp := AzureDeveloperCLICredentialOptions{} + if options != nil { + cp = *options + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + if cp.tokenProvider == nil { + cp.tokenProvider = defaultAzdTokenProvider + } + return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd. +// This method is called automatically by Azure SDK clients. +func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) == 0 { + return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") + } + for _, scope := range opts.Scopes { + if !validScope(scope) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) + } + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "azd auth token -o json" + if tenant != "" { + commandLine += " --tenant-id " + tenant + } + for _, scope := range scopes { + commandLine += " --scope " + scope + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { + msg = "Azure Developer CLI not found on path" + } else if strings.Contains(msg, "azd auth login") { + msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) + } + return output, nil +} + +func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"token"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + return azcore.AccessToken{ + ExpiresOn: exp.UTC(), + Token: t.AccessToken, + }, nil +} + +var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 9002ea0b0..d077682c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -25,6 +25,7 @@ stages: - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: RunLiveTests: true + UsePipelineProxy: false ServiceDirectory: 'azidentity' CloudConfig: Public: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index 303d5fc09..fc3df68eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -20,9 +21,9 @@ const credNameAssertion = "ClientAssertionCredential" // ClientAssertionCredential authenticates an application with assertions provided by a callback function. // This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for // the most common assertion scenario, authenticating a service principal with a certificate. See -// [Azure AD documentation] for details of the assertion format. +// [Microsoft Entra ID documentation] for details of the assertion format. // -// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } @@ -35,11 +36,15 @@ type ClientAssertionCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -56,9 +61,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { @@ -67,9 +73,13 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return &ClientAssertionCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index d3300e305..607533f48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -29,15 +30,20 @@ type ClientCertificateCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -58,10 +64,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { @@ -70,9 +77,13 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return &ClientCertificateCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } // ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index d2ff7582b..9e6772e9b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -24,11 +25,15 @@ type ClientSecretCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientSecretCredential authenticates an application with a client secret. @@ -46,20 +51,25 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { return nil, err } - return &ClientSecretCredential{c}, nil + return &ClientSecretCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 4853a9a00..854267bdb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "net/http" "os" "strings" "sync" @@ -17,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -28,6 +30,7 @@ type confidentialClientOptions struct { // Assertion for on-behalf-of authentication Assertion string DisableInstanceDiscovery, SendX5C bool + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -40,6 +43,7 @@ type confidentialClient struct { name string opts confidentialClientOptions region string + azClient *azcore.Client } func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { @@ -50,6 +54,14 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr if err != nil { return nil, err } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &opts.ClientOptions) + if err != nil { + return nil, err + } opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) return &confidentialClient{ caeMu: &sync.Mutex{}, @@ -62,6 +74,7 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr opts: opts, region: os.Getenv(azureRegionalAuthorityName), tenantID: tenantID, + azClient: client, }, nil } @@ -132,10 +145,15 @@ func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequest } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } authority := runtime.JoinPaths(c.host, c.tenantID) o := []confidential.Option{ confidential.WithAzureRegion(c.region), - confidential.WithHTTPClient(newPipelineAdapter(&c.opts.ClientOptions)), + confidential.WithCache(cache), + confidential.WithHTTPClient(c), } if enableCAE { o = append(o, confidential.WithClientCapabilities(cp1)) @@ -149,8 +167,18 @@ func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClie return confidential.New(authority, c.clientID, c.cred, o...) } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (c *confidentialClient) resolveTenant(specified string) (string, error) { return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) } + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (c *confidentialClient) CloseIdleConnections() { + // do nothing +} + +func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7647c60b1..35aeef867 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -30,7 +30,7 @@ type DefaultAzureCredentialOptions struct { // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -49,6 +49,7 @@ type DefaultAzureCredentialOptions struct { // more control over its configuration. // - [ManagedIdentityCredential] // - [AzureCLICredential] +// - [AzureDeveloperCLICredential] // // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for @@ -117,6 +118,17 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) } + azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, azdCred) + } else { + errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err}) + } + if len(errorMessages) > 0 { log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) } @@ -129,7 +141,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default return &DefaultAzureCredential{chain: chain}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.chain.GetToken(ctx, opts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go new file mode 100644 index 000000000..d8b952f53 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "errors" + "time" +) + +// cliTimeout is the default timeout for authentication attempts via CLI tools +const cliTimeout = 10 * time.Second + +// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try +// the next credential in its chain (another developer credential). +func unavailableIfInChain(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain { + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) + } + } + return err +} + +// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials. +func validScope(scope string) bool { + for _, r := range scope { + if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index d245c269a..1b7a28370 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameDeviceCode = "DeviceCodeCredential" @@ -23,19 +24,34 @@ type DeviceCodeCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user + // interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant // applications. TenantID string + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions + // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -63,14 +79,14 @@ type DeviceCodeMessage struct { UserCode string `json:"user_code"` // VerificationURL is the URL at which the user must authenticate. VerificationURL string `json:"verification_uri"` - // Message is user instruction from Azure Active Directory. + // Message is user instruction from Microsoft Entra ID. Message string `json:"message"` } // DeviceCodeCredential acquires tokens for a user via the device code flow, which has the -// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful // for authenticating a user in an environment without a web browser, such as an SSH session. -// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it // automatically opens a browser to the login page. type DeviceCodeCredential struct { client *publicClient @@ -84,10 +100,13 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC } cp.init() msalOpts := publicClientOptions{ - AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, - ClientOptions: cp.ClientOptions, - DeviceCodePrompt: cp.UserPrompt, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + Record: cp.authenticationRecord, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -97,10 +116,23 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication. // This method is called automatically by Azure SDK clients. func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 7ecd928e0..42f84875e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -25,7 +25,7 @@ type EnvironmentCredentialOptions struct { azcore.ClientOptions // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -156,7 +156,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.cred.GetToken(ctx, opts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index e1a21e003..335d2b7dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -18,6 +18,10 @@ import ( msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) +// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token +// because user interaction is required and the credential is configured not to automatically prompt the user. +var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} + // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -53,7 +57,13 @@ func (e *AuthenticationFailedError) Error() string { } msg := &bytes.Buffer{} fmt.Fprintf(msg, e.credType+" authentication failed\n") - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + // this happens when the response is created from a custom HTTP transporter, + // which doesn't guarantee to bind the original request to the response + fmt.Fprintln(msg, "Request information not available") + } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") @@ -74,6 +84,8 @@ func (e *AuthenticationFailedError) Error() string { switch e.credType { case credNameAzureCLI: anchor = "azure-cli" + case credNameAzureDeveloperCLI: + anchor = "azd" case credNameCert: anchor = "client-cert" case credNameSecret: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work new file mode 100644 index 000000000..04ea962b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./cache +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum new file mode 100644 index 000000000..7cd86b001 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -0,0 +1,39 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 08f3efbf3..bd8296983 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameBrowser = "InteractiveBrowserCredential" @@ -22,26 +23,40 @@ type InteractiveBrowserCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when + // user interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. LoginHint string - // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + + // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required // only when setting ClientID, and must match a redirect URI in the application's registration. // Applications which have registered "http://localhost" as a redirect URI need not set this option. RedirectURL string - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -66,10 +81,14 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption } cp.init() msalOpts := publicClientOptions{ - ClientOptions: cp.ClientOptions, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - LoginHint: cp.LoginHint, - RedirectURL: cp.RedirectURL, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + Record: cp.authenticationRecord, + RedirectURL: cp.RedirectURL, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -78,9 +97,22 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go new file mode 100644 index 000000000..b1b4d5c8b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// TokenCachePersistenceOptions contains options for persistent token caching +type TokenCachePersistenceOptions struct { + // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text + // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts + // encryption before falling back to plaintext storage. + AllowUnencryptedStorage bool + + // Name identifies the cache. Set this to isolate data from other applications. + Name string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go new file mode 100644 index 000000000..c1498b464 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "errors" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") + +// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to +// use a persistent cache must first import the cache module, which will replace this function +// with a platform-specific implementation. +var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { + if o == nil { + return nil, nil + } + return nil, errMissingImport +} + +// CacheFilePath returns the path to the cache file for the given name. +// Defining it in this package makes it available to azidentity tests. +var CacheFilePath = func(name string) (string, error) { + return "", errMissingImport +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index fdc3c1f67..7c25cb8bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -28,12 +28,14 @@ import ( const ( arcIMDSEndpoint = "IMDS_ENDPOINT" + defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID" identityEndpoint = "IDENTITY_ENDPOINT" identityHeader = "IDENTITY_HEADER" identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" msiEndpoint = "MSI_ENDPOINT" + msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" serviceFabricAPIVersion = "2019-07-01-preview" @@ -47,6 +49,7 @@ type msiType int const ( msiTypeAppService msiType = iota msiTypeAzureArc + msiTypeAzureML msiTypeCloudShell msiTypeIMDS msiTypeServiceFabric @@ -55,7 +58,7 @@ const ( // managedIdentityClient provides the base for authenticating in managed identity environments // This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - pipeline runtime.Pipeline + azClient *azcore.Client msiType msiType endpoint string id ManagedIDKind @@ -135,13 +138,27 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAzureArc } } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - env = "Cloud Shell" c.endpoint = endpoint - c.msiType = msiTypeCloudShell + if _, ok := os.LookupEnv(msiSecret); ok { + env = "Azure ML" + c.msiType = msiTypeAzureML + } else { + env = "Cloud Shell" + c.msiType = msiTypeCloudShell + } } else { setIMDSRetryOptionDefaults(&cp.Retry) } - c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &cp) + if err != nil { + return nil, err + } + c.azClient = client if log.Should(EventAuthentication) { log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) @@ -168,7 +185,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, err } - resp, err := c.pipeline.Do(msg) + resp, err := c.azClient.Pipeline().Do(msg) if err != nil { return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } @@ -247,6 +264,8 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) } return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeAzureML: + return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: return c.createServiceFabricAuthRequest(ctx, id, scopes) case msiTypeCloudShell: @@ -296,6 +315,29 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, return request, nil } +func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("secret", os.Getenv(msiSecret)) + q := request.Raw().URL.Query() + q.Add("api-version", "2017-09-01") + q.Add("resource", strings.Join(scopes, " ")) + q.Add("clientid", os.Getenv(defaultIdentityClientID)) + if id != nil { + if id.idKind() == miResourceID { + log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") + q.Set("clientid", "") + q.Set(qpResID, id.String()) + } else { + q.Set("clientid", id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { @@ -330,7 +372,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour q.Add("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key - response, err := c.pipeline.Do(request) + response, err := c.azClient.Pipeline().Do(request) if err != nil { return "", err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 35c5e6725..dcd278bef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -67,8 +68,8 @@ type ManagedIdentityCredentialOptions struct { // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: -// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: +// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient @@ -92,7 +93,9 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M clientID = options.ID.String() } // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value - c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{}) + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ + ClientOptions: options.ClientOptions, + }) if err != nil { return nil, err } @@ -101,13 +104,18 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + if len(opts.Scopes) != 1 { - err := fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) + err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.client.GetToken(ctx, opts) + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 2b360b681..5e67cf021 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -21,9 +22,9 @@ const credNameOBO = "OnBehalfOfCredential" // OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by // middle-tier services that authorize requests to other services with a delegated user identity. Because this // is not an interactive authentication flow, an application using it must have admin consent for any delegated -// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -36,11 +37,13 @@ type OnBehalfOfCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. // This setting controls whether the credential sends the public certificate chain in the x5c header of each // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. @@ -84,9 +87,13 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf return &OnBehalfOfCredential{c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return o.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := o.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 6512d3e25..63c31190d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -8,38 +8,52 @@ package azidentity import ( "context" + "errors" "fmt" + "net/http" "strings" "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + + // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate() + _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" ) type publicClientOptions struct { azcore.ClientOptions - AdditionallyAllowedTenants []string - DeviceCodePrompt func(context.Context, DeviceCodeMessage) error - DisableInstanceDiscovery bool - LoginHint, RedirectURL string - Username, Password string + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableAutomaticAuthentication bool + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Record authenticationRecord + TokenCachePersistenceOptions *tokenCachePersistenceOptions + Username, Password string } // publicClient wraps the MSAL public client type publicClient struct { - account public.Account cae, noCAE msalPublicClient caeMu, noCAEMu, clientMu *sync.Mutex clientID, tenantID string + defaultScope []string host string name string opts publicClientOptions + record authenticationRecord + azClient *azcore.Client } +var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions") + func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { if !validTenantID(tenantID) { return nil, errInvalidTenantID @@ -48,19 +62,76 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p if err != nil { return nil, err } + // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate() + audience := o.Cloud.Services[cloud.ResourceManager].Audience + if audience == "" { + // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash) + if !strings.HasSuffix(host, "/") { + host += "/" + } + switch host { + case cloud.AzureChina.ActiveDirectoryAuthorityHost: + audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience + case cloud.AzureGovernment.ActiveDirectoryAuthorityHost: + audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience + case cloud.AzurePublic.ActiveDirectoryAuthorityHost: + audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience + } + } + // if we didn't come up with an audience, the application will have to specify a scope for Authenticate() + var defaultScope []string + if audience != "" { + defaultScope = []string{audience + defaultSuffix} + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &o.ClientOptions) + if err != nil { + return nil, err + } o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) return &publicClient{ - caeMu: &sync.Mutex{}, - clientID: clientID, - clientMu: &sync.Mutex{}, - host: host, - name: name, - noCAEMu: &sync.Mutex{}, - opts: o, - tenantID: tenantID, + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + defaultScope: defaultScope, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + record: o.Record, + tenantID: tenantID, + azClient: client, }, nil } +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { + if tro == nil { + tro = &policy.TokenRequestOptions{} + } + if len(tro.Scopes) == 0 { + if p.defaultScope == nil { + return authenticationRecord{}, errScopeRequired + } + tro.Scopes = p.defaultScope + } + client, mu, err := p.client(*tro) + if err != nil { + return authenticationRecord{}, err + } + mu.Lock() + defer mu.Unlock() + _, err = p.reqToken(ctx, client, *tro) + if err == nil { + scope := strings.Join(tro.Scopes, ", ") + msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope) + log.Write(EventAuthentication, msg) + } + return p.record, err +} + // GetToken requests an access token from MSAL, checking the cache first. func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { if len(tro.Scopes) < 1 { @@ -76,10 +147,13 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti } mu.Lock() defer mu.Unlock() - ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.account), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) if err == nil { return p.token(ar, err) } + if p.opts.DisableAutomaticAuthentication { + return azcore.AccessToken{}, errAuthenticationRequired + } at, err := p.reqToken(ctx, client, tro) if err == nil { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) @@ -148,9 +222,14 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithHTTPClient(newPipelineAdapter(&p.opts.ClientOptions)), + public.WithCache(cache), + public.WithHTTPClient(p), } if enableCAE { o = append(o, public.WithClientCapabilities(cp1)) @@ -163,7 +242,7 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { - p.account = ar.Account + p.record, err = newAuthenticationRecord(ar) } else { res := getResponseFromError(err) err = newAuthenticationFailedError(p.name, err.Error(), res, err) @@ -171,8 +250,24 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (p *publicClient) resolveTenant(specified string) (string, error) { - return resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + if t == p.tenantID { + // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify + // the client's default tenant and doing so is an error when that tenant is "organizations" + t = "" + } + return t, err +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (p *publicClient) CloseIdleConnections() { + // do nothing +} + +func (p *publicClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(p.azClient, r) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index f787ec0ce..294ed81e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameUserPassword = "UsernamePasswordCredential" @@ -23,11 +24,19 @@ type UsernamePasswordCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -45,11 +54,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.authenticationRecord, + TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -58,9 +69,22 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st return &UsernamePasswordCredential{client: c}, err } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 65e74e31e..e8caeea71 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -10,6 +10,9 @@ const ( // UserAgent is the string to be used in the user agent string when making requests. component = "azidentity" + // module is the fully qualified name of the module used in telemetry and distributed tracing. + module = "github.com/Azure/azure-sdk-for-go/sdk/" + component + // Version is the semantic version (see http://semver.org) of this module. - version = "v1.4.0" + version = "v1.5.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 7e016324d..3e43e788e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameWorkloadIdentity = "WorkloadIdentityCredential" @@ -41,7 +42,7 @@ type WorkloadIdentityCredentialOptions struct { // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -93,9 +94,13 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( return &w, nil } -// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return w.cred.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := w.cred.GetToken(ctx, opts) + return tk, err } // getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go index d4ed6ccc8..9948f604b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -39,6 +39,11 @@ type PayloadOptions struct { // Subsequent reads will access the cached value. // Exported as runtime.Payload() WITHOUT the opts parameter. func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + if resp.Body == nil { + // this shouldn't happen in real-world scenarios as a + // response with no body should set it to http.NoBody + return nil, nil + } modifyBytes := func(b []byte) []byte { return b } if opts != nil && opts.BytesModifier != nil { modifyBytes = opts.BytesModifier diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index b19244d46..836f84817 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History +## 5.5.0 (2024-01-26) +### Features Added + +- New value `DiskSecurityTypesConfidentialVMNonPersistedTPM` added to enum type `DiskSecurityTypes` +- New enum type `ProvisionedBandwidthCopyOption` with values `ProvisionedBandwidthCopyOptionEnhanced`, `ProvisionedBandwidthCopyOptionNone` +- New field `ProvisionedBandwidthCopySpeed` in struct `CreationData` + + ## 5.4.0 (2023-12-22) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md index 7c8d1e3e1..c4bf8ab34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md @@ -57,7 +57,7 @@ clientFactory, err := armcompute.NewClientFactory(, cred, &opti A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. ```go -client := clientFactory.NewLogAnalyticsClient() +client := clientFactory.NewAvailabilitySetsClient() ``` ## Fakes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index 1feae4c5c..f8177c784 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_323718962d" + "Tag": "go/resourcemanager/compute/armcompute_444bc7d3bc" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index 9d88c8a6d..3e856c7d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/60679ee3db06e93eb73faa0587fed93ed843d6dc/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/41e4538ed7bb3ceac3c1322c9455a0812ed110ac/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/41e4538ed7bb3ceac3c1322c9455a0812ed110ac/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.4.0 -tag: package-2023-09-01 +module-version: 5.5.0 +tag: package-2023-10-02 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index d635d4f51..e4858a588 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.4.0" + moduleVersion = "v5.5.0" ) type AccessLevel string @@ -543,6 +543,9 @@ const ( // DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey - Indicates Confidential VM disk with both OS disk and VM guest // state encrypted with a platform managed key DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithPlatformKey" + // DiskSecurityTypesConfidentialVMNonPersistedTPM - Indicates Confidential VM disk with a ephemeral vTPM. vTPM state is not + // persisted across VM reboots. + DiskSecurityTypesConfidentialVMNonPersistedTPM DiskSecurityTypes = "ConfidentialVM_NonPersistedTPM" // DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey - Indicates Confidential VM disk with only VM guest // state encrypted DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey" @@ -556,6 +559,7 @@ func PossibleDiskSecurityTypesValues() []DiskSecurityTypes { return []DiskSecurityTypes{ DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey, DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey, + DiskSecurityTypesConfidentialVMNonPersistedTPM, DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey, DiskSecurityTypesTrustedLaunch, } @@ -1500,6 +1504,23 @@ func PossibleProtocolTypesValues() []ProtocolTypes { } } +// ProvisionedBandwidthCopyOption - If this field is set on a snapshot and createOption is CopyStart, the snapshot will be +// copied at a quicker speed. +type ProvisionedBandwidthCopyOption string + +const ( + ProvisionedBandwidthCopyOptionEnhanced ProvisionedBandwidthCopyOption = "Enhanced" + ProvisionedBandwidthCopyOptionNone ProvisionedBandwidthCopyOption = "None" +) + +// PossibleProvisionedBandwidthCopyOptionValues returns the possible values for the ProvisionedBandwidthCopyOption const type. +func PossibleProvisionedBandwidthCopyOptionValues() []ProvisionedBandwidthCopyOption { + return []ProvisionedBandwidthCopyOption{ + ProvisionedBandwidthCopyOptionEnhanced, + ProvisionedBandwidthCopyOptionNone, + } +} + // ProximityPlacementGroupType - Specifies the type of the proximity placement group. Possible values are: Standard : Co-locate // resources within an Azure region or Availability Zone. Ultra : For future use. type ProximityPlacementGroupType string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go index b189a0340..0bea05f3e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go @@ -47,7 +47,7 @@ func NewDiskAccessesClient(subscriptionID string, credential azcore.TokenCredent // BeginCreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -75,7 +75,7 @@ func (client *DiskAccessesClient) BeginCreateOrUpdate(ctx context.Context, resou // CreateOrUpdate - Creates or updates a disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -129,7 +129,7 @@ func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Contex // BeginDelete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -156,7 +156,7 @@ func (client *DiskAccessesClient) BeginDelete(ctx context.Context, resourceGroup // Delete - Deletes a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteOperation(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resou // BeginDeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -235,7 +235,7 @@ func (client *DiskAccessesClient) BeginDeleteAPrivateEndpointConnection(ctx cont // DeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) deleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginDeleteAPrivateEndpointConnection" @@ -281,7 +281,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -290,7 +290,7 @@ func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest( // Get - Gets information about a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -338,7 +338,7 @@ func (client *DiskAccessesClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DiskAccessesClient) getHandleResponse(resp *http.Response) (DiskAc // GetAPrivateEndpointConnection - Gets information about a private endpoint connection under a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -410,7 +410,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -428,7 +428,7 @@ func (client *DiskAccessesClient) getAPrivateEndpointConnectionHandleResponse(re // GetPrivateLinkResources - Gets the private link resources possible under disk access resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -477,7 +477,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -494,7 +494,7 @@ func (client *DiskAccessesClient) getPrivateLinkResourcesHandleResponse(resp *ht // NewListPager - Lists all the disk access resources under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskAccessesClientListOptions contains the optional parameters for the DiskAccessesClient.NewListPager method. func (client *DiskAccessesClient) NewListPager(options *DiskAccessesClientListOptions) *runtime.Pager[DiskAccessesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListResponse]{ @@ -531,7 +531,7 @@ func (client *DiskAccessesClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -548,7 +548,7 @@ func (client *DiskAccessesClient) listHandleResponse(resp *http.Response) (DiskA // NewListByResourceGroupPager - Lists all the disk access resources under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskAccessesClientListByResourceGroupOptions contains the optional parameters for the DiskAccessesClient.NewListByResourceGroupPager // method. @@ -591,7 +591,7 @@ func (client *DiskAccessesClient) listByResourceGroupCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -608,7 +608,7 @@ func (client *DiskAccessesClient) listByResourceGroupHandleResponse(resp *http.R // NewListPrivateEndpointConnectionsPager - List information about private endpoint connections under a disk access resource // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -658,7 +658,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -676,7 +676,7 @@ func (client *DiskAccessesClient) listPrivateEndpointConnectionsHandleResponse(r // BeginUpdate - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -704,7 +704,7 @@ func (client *DiskAccessesClient) BeginUpdate(ctx context.Context, resourceGroup // Update - Updates (patches) a disk access resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdate" @@ -746,7 +746,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskAccess); err != nil { @@ -759,7 +759,7 @@ func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resou // can't be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption // set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The @@ -790,7 +790,7 @@ func (client *DiskAccessesClient) BeginUpdateAPrivateEndpointConnection(ctx cont // be used to create a new private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskAccessesClient) updateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*http.Response, error) { var err error const operationName = "DiskAccessesClient.BeginUpdateAPrivateEndpointConnection" @@ -836,7 +836,7 @@ func (client *DiskAccessesClient) updateAPrivateEndpointConnectionCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, privateEndpointConnection); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go index 9306f9c0d..59d34d1e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go @@ -47,7 +47,7 @@ func NewDiskEncryptionSetsClient(subscriptionID string, credential azcore.TokenC // BeginCreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -75,7 +75,7 @@ func (client *DiskEncryptionSetsClient) BeginCreateOrUpdate(ctx context.Context, // CreateOrUpdate - Creates or updates a disk encryption set // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { @@ -129,7 +129,7 @@ func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context. // BeginDelete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -156,7 +156,7 @@ func (client *DiskEncryptionSetsClient) BeginDelete(ctx context.Context, resourc // Delete - Deletes a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginDelete" @@ -198,7 +198,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -207,7 +207,7 @@ func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, // Get - Gets information about a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -255,7 +255,7 @@ func (client *DiskEncryptionSetsClient) getCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *DiskEncryptionSetsClient) getHandleResponse(resp *http.Response) ( // NewListPager - Lists all the disk encryption sets under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DiskEncryptionSetsClientListOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListPager // method. func (client *DiskEncryptionSetsClient) NewListPager(options *DiskEncryptionSetsClientListOptions) *runtime.Pager[DiskEncryptionSetsClientListResponse] { @@ -310,7 +310,7 @@ func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -327,7 +327,7 @@ func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) // NewListAssociatedResourcesPager - Lists all resources that are encrypted with this disk encryption set. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -377,7 +377,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -394,7 +394,7 @@ func (client *DiskEncryptionSetsClient) listAssociatedResourcesHandleResponse(re // NewListByResourceGroupPager - Lists all the disk encryption sets under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DiskEncryptionSetsClientListByResourceGroupOptions contains the optional parameters for the DiskEncryptionSetsClient.NewListByResourceGroupPager // method. @@ -437,7 +437,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -455,7 +455,7 @@ func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp * // BeginUpdate - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the // disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum @@ -483,7 +483,7 @@ func (client *DiskEncryptionSetsClient) BeginUpdate(ctx context.Context, resourc // Update - Updates (patches) a disk encryption set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskEncryptionSetsClient) update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DiskEncryptionSetsClient.BeginUpdate" @@ -525,7 +525,7 @@ func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, diskEncryptionSet); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go index 54b576957..43307ca04 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go @@ -47,7 +47,7 @@ func NewDiskRestorePointClient(subscriptionID string, credential azcore.TokenCre // Get - Get disk restorePoint resource // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -103,7 +103,7 @@ func (client *DiskRestorePointClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -121,7 +121,7 @@ func (client *DiskRestorePointClient) getHandleResponse(resp *http.Response) (Di // BeginGrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -150,7 +150,7 @@ func (client *DiskRestorePointClient) BeginGrantAccess(ctx context.Context, reso // GrantAccess - Grants access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) grantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginGrantAccess" @@ -200,7 +200,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -211,7 +211,7 @@ func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Conte // NewListByRestorePointPager - Lists diskRestorePoints under a vmRestorePoint. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -264,7 +264,7 @@ func (client *DiskRestorePointClient) listByRestorePointCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -282,7 +282,7 @@ func (client *DiskRestorePointClient) listByRestorePointHandleResponse(resp *htt // BeginRevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. // - vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. @@ -310,7 +310,7 @@ func (client *DiskRestorePointClient) BeginRevokeAccess(ctx context.Context, res // RevokeAccess - Revokes access to a diskRestorePoint. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DiskRestorePointClient) revokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DiskRestorePointClient.BeginRevokeAccess" @@ -360,7 +360,7 @@ func (client *DiskRestorePointClient) revokeAccessCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go index 854a8ffd6..a0ffa752b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go @@ -47,7 +47,7 @@ func NewDisksClient(subscriptionID string, credential azcore.TokenCredential, op // BeginCreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -75,7 +75,7 @@ func (client *DisksClient) BeginCreateOrUpdate(ctx context.Context, resourceGrou // CreateOrUpdate - Creates or updates a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { @@ -129,7 +129,7 @@ func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, reso // BeginDelete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -155,7 +155,7 @@ func (client *DisksClient) BeginDelete(ctx context.Context, resourceGroupName st // Delete - Deletes a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) deleteOperation(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGrou // Get - Gets information about a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -253,7 +253,7 @@ func (client *DisksClient) getCreateRequest(ctx context.Context, resourceGroupNa return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *DisksClient) getHandleResponse(resp *http.Response) (DisksClientGe // BeginGrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -299,7 +299,7 @@ func (client *DisksClient) BeginGrantAccess(ctx context.Context, resourceGroupNa // GrantAccess - Grants access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) grantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginGrantAccess" @@ -341,7 +341,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -352,7 +352,7 @@ func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourc // NewListPager - Lists all the disks under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - DisksClientListOptions contains the optional parameters for the DisksClient.NewListPager method. func (client *DisksClient) NewListPager(options *DisksClientListOptions) *runtime.Pager[DisksClientListResponse] { return runtime.NewPager(runtime.PagingHandler[DisksClientListResponse]{ @@ -389,7 +389,7 @@ func (client *DisksClient) listCreateRequest(ctx context.Context, options *Disks return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -406,7 +406,7 @@ func (client *DisksClient) listHandleResponse(resp *http.Response) (DisksClientL // NewListByResourceGroupPager - Lists all the disks under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - DisksClientListByResourceGroupOptions contains the optional parameters for the DisksClient.NewListByResourceGroupPager // method. @@ -449,7 +449,7 @@ func (client *DisksClient) listByResourceGroupCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -467,7 +467,7 @@ func (client *DisksClient) listByResourceGroupHandleResponse(resp *http.Response // BeginRevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -494,7 +494,7 @@ func (client *DisksClient) BeginRevokeAccess(ctx context.Context, resourceGroupN // RevokeAccess - Revokes access to a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) revokeAccess(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginRevokeAccess" @@ -536,7 +536,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -544,7 +544,7 @@ func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resour // BeginUpdate - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported // characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 @@ -571,7 +571,7 @@ func (client *DisksClient) BeginUpdate(ctx context.Context, resourceGroupName st // Update - Updates (patches) a disk. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *DisksClient) update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DisksClient.BeginUpdate" @@ -613,7 +613,7 @@ func (client *DisksClient) updateCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, disk); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index 5b5a3197b..ffba7145d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -1037,6 +1037,9 @@ type CreationData struct { // disabled after enabled. PerformancePlus *bool + // If this field is set on a snapshot and createOption is CopyStart, the snapshot will be copied at a quicker speed. + ProvisionedBandwidthCopySpeed *ProvisionedBandwidthCopyOption + // If createOption is ImportSecure, this is the URI of a blob to be imported into VM guest state. SecurityDataURI *string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 3c63478d0..077f40dd3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -2404,6 +2404,7 @@ func (c CreationData) MarshalJSON() ([]byte, error) { populate(objectMap, "imageReference", c.ImageReference) populate(objectMap, "logicalSectorSize", c.LogicalSectorSize) populate(objectMap, "performancePlus", c.PerformancePlus) + populate(objectMap, "provisionedBandwidthCopySpeed", c.ProvisionedBandwidthCopySpeed) populate(objectMap, "securityDataUri", c.SecurityDataURI) populate(objectMap, "sourceResourceId", c.SourceResourceID) populate(objectMap, "sourceUri", c.SourceURI) @@ -2440,6 +2441,9 @@ func (c *CreationData) UnmarshalJSON(data []byte) error { case "performancePlus": err = unpopulate(val, "PerformancePlus", &c.PerformancePlus) delete(rawMsg, key) + case "provisionedBandwidthCopySpeed": + err = unpopulate(val, "ProvisionedBandwidthCopySpeed", &c.ProvisionedBandwidthCopySpeed) + delete(rawMsg, key) case "securityDataUri": err = unpopulate(val, "SecurityDataURI", &c.SecurityDataURI) delete(rawMsg, key) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go index a0d263613..8c73f7709 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go @@ -47,7 +47,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -75,7 +75,7 @@ func (client *SnapshotsClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) createOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginCreateOrUpdate" @@ -117,7 +117,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { @@ -129,7 +129,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -155,7 +155,7 @@ func (client *SnapshotsClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) deleteOperation(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginDelete" @@ -197,7 +197,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -205,7 +205,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets information about a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -253,7 +253,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -271,7 +271,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // BeginGrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -300,7 +300,7 @@ func (client *SnapshotsClient) BeginGrantAccess(ctx context.Context, resourceGro // GrantAccess - Grants access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) grantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginGrantAccess" @@ -342,7 +342,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, grantAccessData); err != nil { @@ -353,7 +353,7 @@ func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, res // NewListPager - Lists snapshots under a subscription. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -390,7 +390,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -407,7 +407,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots under a resource group. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -450,7 +450,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -468,7 +468,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginRevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -496,7 +496,7 @@ func (client *SnapshotsClient) BeginRevokeAccess(ctx context.Context, resourceGr // RevokeAccess - Revokes access to a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) revokeAccess(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginRevokeAccess" @@ -538,7 +538,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() return req, nil } @@ -546,7 +546,7 @@ func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, re // BeginUpdate - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 // - resourceGroupName - The name of the resource group. // - snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. // Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 @@ -573,7 +573,7 @@ func (client *SnapshotsClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Updates (patches) a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-04-02 +// Generated from API version 2023-10-02 func (client *SnapshotsClient) update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "SnapshotsClient.BeginUpdate" @@ -615,7 +615,7 @@ func (client *SnapshotsClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-04-02") + reqQP.Set("api-version", "2023-10-02") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, snapshot); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md index 837482870..acc2e104c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md @@ -1,5 +1,110 @@ # Release History +## 4.7.0 (2024-01-26) +### Features Added + +- New field `NodeSoakDurationInMinutes` in struct `AgentPoolUpgradeSettings` + + +## 4.7.0-beta.1 (2023-12-22) +### Features Added + +- New value `AgentPoolTypeVirtualMachines` added to enum type `AgentPoolType` +- New value `NetworkPolicyNone` added to enum type `NetworkPolicy` +- New value `NodeOSUpgradeChannelSecurityPatch` added to enum type `NodeOSUpgradeChannel` +- New value `OSSKUMariner`, `OSSKUWindowsAnnual` added to enum type `OSSKU` +- New value `PublicNetworkAccessSecuredByPerimeter` added to enum type `PublicNetworkAccess` +- New value `SnapshotTypeManagedCluster` added to enum type `SnapshotType` +- New value `WorkloadRuntimeKataMshvVMIsolation` added to enum type `WorkloadRuntime` +- New enum type `AddonAutoscaling` with values `AddonAutoscalingDisabled`, `AddonAutoscalingEnabled` +- New enum type `AgentPoolSSHAccess` with values `AgentPoolSSHAccessDisabled`, `AgentPoolSSHAccessLocalUser` +- New enum type `GuardrailsSupport` with values `GuardrailsSupportPreview`, `GuardrailsSupportStable` +- New enum type `IpvsScheduler` with values `IpvsSchedulerLeastConnection`, `IpvsSchedulerRoundRobin` +- New enum type `Level` with values `LevelEnforcement`, `LevelOff`, `LevelWarning` +- New enum type `Mode` with values `ModeIPTABLES`, `ModeIPVS` +- New enum type `NodeProvisioningMode` with values `NodeProvisioningModeAuto`, `NodeProvisioningModeManual` +- New enum type `RestrictionLevel` with values `RestrictionLevelReadOnly`, `RestrictionLevelUnrestricted` +- New function `*AgentPoolsClient.BeginDeleteMachines(context.Context, string, string, string, AgentPoolDeleteMachinesParameter, *AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[AgentPoolsClientDeleteMachinesResponse], error)` +- New function `*ClientFactory.NewMachinesClient() *MachinesClient` +- New function `*ClientFactory.NewManagedClusterSnapshotsClient() *ManagedClusterSnapshotsClient` +- New function `*ClientFactory.NewOperationStatusResultClient() *OperationStatusResultClient` +- New function `NewMachinesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*MachinesClient, error)` +- New function `*MachinesClient.Get(context.Context, string, string, string, string, *MachinesClientGetOptions) (MachinesClientGetResponse, error)` +- New function `*MachinesClient.NewListPager(string, string, string, *MachinesClientListOptions) *runtime.Pager[MachinesClientListResponse]` +- New function `NewManagedClusterSnapshotsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ManagedClusterSnapshotsClient, error)` +- New function `*ManagedClusterSnapshotsClient.CreateOrUpdate(context.Context, string, string, ManagedClusterSnapshot, *ManagedClusterSnapshotsClientCreateOrUpdateOptions) (ManagedClusterSnapshotsClientCreateOrUpdateResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Delete(context.Context, string, string, *ManagedClusterSnapshotsClientDeleteOptions) (ManagedClusterSnapshotsClientDeleteResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Get(context.Context, string, string, *ManagedClusterSnapshotsClientGetOptions) (ManagedClusterSnapshotsClientGetResponse, error)` +- New function `*ManagedClusterSnapshotsClient.NewListByResourceGroupPager(string, *ManagedClusterSnapshotsClientListByResourceGroupOptions) *runtime.Pager[ManagedClusterSnapshotsClientListByResourceGroupResponse]` +- New function `*ManagedClusterSnapshotsClient.NewListPager(*ManagedClusterSnapshotsClientListOptions) *runtime.Pager[ManagedClusterSnapshotsClientListResponse]` +- New function `*ManagedClusterSnapshotsClient.UpdateTags(context.Context, string, string, TagsObject, *ManagedClusterSnapshotsClientUpdateTagsOptions) (ManagedClusterSnapshotsClientUpdateTagsResponse, error)` +- New function `*ManagedClustersClient.GetGuardrailsVersions(context.Context, string, string, *ManagedClustersClientGetGuardrailsVersionsOptions) (ManagedClustersClientGetGuardrailsVersionsResponse, error)` +- New function `*ManagedClustersClient.NewListGuardrailsVersionsPager(string, *ManagedClustersClientListGuardrailsVersionsOptions) *runtime.Pager[ManagedClustersClientListGuardrailsVersionsResponse]` +- New function `NewOperationStatusResultClient(string, azcore.TokenCredential, *arm.ClientOptions) (*OperationStatusResultClient, error)` +- New function `*OperationStatusResultClient.Get(context.Context, string, string, string, *OperationStatusResultClientGetOptions) (OperationStatusResultClientGetResponse, error)` +- New function `*OperationStatusResultClient.GetByAgentPool(context.Context, string, string, string, string, *OperationStatusResultClientGetByAgentPoolOptions) (OperationStatusResultClientGetByAgentPoolResponse, error)` +- New function `*OperationStatusResultClient.NewListPager(string, string, *OperationStatusResultClientListOptions) *runtime.Pager[OperationStatusResultClientListResponse]` +- New struct `AgentPoolArtifactStreamingProfile` +- New struct `AgentPoolDeleteMachinesParameter` +- New struct `AgentPoolGPUProfile` +- New struct `AgentPoolSecurityProfile` +- New struct `AgentPoolWindowsProfile` +- New struct `ErrorAdditionalInfo` +- New struct `ErrorDetail` +- New struct `GuardrailsAvailableVersion` +- New struct `GuardrailsAvailableVersionsList` +- New struct `GuardrailsAvailableVersionsProperties` +- New struct `GuardrailsProfile` +- New struct `Machine` +- New struct `MachineIPAddress` +- New struct `MachineListResult` +- New struct `MachineNetworkProperties` +- New struct `MachineProperties` +- New struct `ManagedClusterAIToolchainOperatorProfile` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoring` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics` +- New struct `ManagedClusterAzureMonitorProfileContainerInsights` +- New struct `ManagedClusterAzureMonitorProfileLogs` +- New struct `ManagedClusterAzureMonitorProfileWindowsHostLogs` +- New struct `ManagedClusterCostAnalysis` +- New struct `ManagedClusterIngressProfile` +- New struct `ManagedClusterIngressProfileWebAppRouting` +- New struct `ManagedClusterMetricsProfile` +- New struct `ManagedClusterNodeProvisioningProfile` +- New struct `ManagedClusterNodeResourceGroupProfile` +- New struct `ManagedClusterPropertiesForSnapshot` +- New struct `ManagedClusterSecurityProfileImageIntegrity` +- New struct `ManagedClusterSecurityProfileNodeRestriction` +- New struct `ManagedClusterSnapshot` +- New struct `ManagedClusterSnapshotListResult` +- New struct `ManagedClusterSnapshotProperties` +- New struct `ManualScaleProfile` +- New struct `NetworkMonitoring` +- New struct `NetworkProfileForSnapshot` +- New struct `NetworkProfileKubeProxyConfig` +- New struct `NetworkProfileKubeProxyConfigIpvsConfig` +- New struct `OperationStatusResult` +- New struct `OperationStatusResultList` +- New struct `ScaleProfile` +- New struct `VirtualMachineNodes` +- New struct `VirtualMachinesProfile` +- New field `NodeSoakDurationInMinutes` in struct `AgentPoolUpgradeSettings` +- New field `IgnorePodDisruptionBudget` in struct `AgentPoolsClientBeginDeleteOptions` +- New field `EnableVnetIntegration`, `SubnetID` in struct `ManagedClusterAPIServerAccessProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfileProperties` +- New field `Logs` in struct `ManagedClusterAzureMonitorProfile` +- New field `AppMonitoringOpenTelemetryMetrics` in struct `ManagedClusterAzureMonitorProfileMetrics` +- New field `EffectiveNoProxy` in struct `ManagedClusterHTTPProxyConfig` +- New field `AiToolchainOperatorProfile`, `CreationData`, `EnableNamespaceResources`, `GuardrailsProfile`, `IngressProfile`, `MetricsProfile`, `NodeProvisioningProfile`, `NodeResourceGroupProfile` in struct `ManagedClusterProperties` +- New field `DaemonsetEvictionForEmptyNodes`, `DaemonsetEvictionForOccupiedNodes`, `IgnoreDaemonsetsUtilization` in struct `ManagedClusterPropertiesAutoScalerProfile` +- New field `CustomCATrustCertificates`, `ImageIntegrity`, `NodeRestriction` in struct `ManagedClusterSecurityProfile` +- New field `Version` in struct `ManagedClusterStorageProfileDiskCSIDriver` +- New field `AddonAutoscaling` in struct `ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler` +- New field `IgnorePodDisruptionBudget` in struct `ManagedClustersClientBeginDeleteOptions` +- New field `KubeProxyConfig`, `Monitoring` in struct `NetworkProfile` + + ## 4.6.0 (2023-11-24) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/README.md index 7cdd0c92e..44d307082 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/README.md @@ -1,6 +1,6 @@ # Azure Container Service Module for Go -[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4) The `armcontainerservice` module provides operations for working with Azure Container Service. @@ -20,7 +20,7 @@ This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for ve Install the Azure Container Service module: ```sh -go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 +go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 ``` ## Authorization @@ -57,7 +57,7 @@ clientFactory, err := armcontainerservice.NewClientFactory(, cr A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. ```go -client := clientFactory.NewSnapshotsClient() +client := clientFactory.NewAgentPoolsClient() ``` ## Fakes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go index bf2e9c712..ba8dcb84c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go @@ -48,7 +48,7 @@ func NewAgentPoolsClient(subscriptionID string, credential azcore.TokenCredentia // before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -77,7 +77,7 @@ func (client *AgentPoolsClient) BeginAbortLatestOperation(ctx context.Context, r // before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *AgentPoolsClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginAbortLatestOperation" @@ -123,7 +123,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -132,7 +132,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co // BeginCreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -159,7 +159,7 @@ func (client *AgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourc // CreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *AgentPoolsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool, options *AgentPoolsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginCreateOrUpdate" @@ -205,7 +205,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -217,7 +217,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -242,7 +242,7 @@ func (client *AgentPoolsClient) BeginDelete(ctx context.Context, resourceGroupNa // Delete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *AgentPoolsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginDelete" @@ -288,7 +288,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -297,7 +297,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc // Get - Gets the specified managed cluster agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -348,7 +348,7 @@ func (client *AgentPoolsClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +367,7 @@ func (client *AgentPoolsClient) getHandleResponse(resp *http.Response) (AgentPoo // for more details about the version lifecycle. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientGetAvailableAgentPoolVersionsOptions contains the optional parameters for the AgentPoolsClient.GetAvailableAgentPoolVersions @@ -414,7 +414,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -432,7 +432,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsHandleResponse(resp // GetUpgradeProfile - Gets the upgrade profile for an agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -484,7 +484,7 @@ func (client *AgentPoolsClient) getUpgradeProfileCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -501,7 +501,7 @@ func (client *AgentPoolsClient) getUpgradeProfileHandleResponse(resp *http.Respo // NewListPager - Gets a list of agent pools in the specified managed cluster. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientListOptions contains the optional parameters for the AgentPoolsClient.NewListPager method. @@ -548,7 +548,7 @@ func (client *AgentPoolsClient) listCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -568,7 +568,7 @@ func (client *AgentPoolsClient) listHandleResponse(resp *http.Response) (AgentPo // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -597,7 +597,7 @@ func (client *AgentPoolsClient) BeginUpgradeNodeImageVersion(ctx context.Context // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *AgentPoolsClient) upgradeNodeImageVersion(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginUpgradeNodeImageVersionOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginUpgradeNodeImageVersion" @@ -643,7 +643,7 @@ func (client *AgentPoolsClient) upgradeNodeImageVersionCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json index f0727b781..d8f19405a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/containerservice/armcontainerservice", - "Tag": "go/resourcemanager/containerservice/armcontainerservice_2822db81dc" + "Tag": "go/resourcemanager/containerservice/armcontainerservice_d09553bfda" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md index 9fccdcc2c..65197fe3f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/c1ba9df47907f9012ae14ca4616aed9e5665f9e5/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/c1ba9df47907f9012ae14ca4616aed9e5665f9e5/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/8b5618d760532b69d6f7434f57172ea52e109c79/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/8b5618d760532b69d6f7434f57172ea52e109c79/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 4.6.0 -tag: package-2023-10 +module-version: 4.7.0 +tag: package-2023-11 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go index 23f7875b1..a8f2d989b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go @@ -10,7 +10,7 @@ package armcontainerservice const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice" - moduleVersion = "v4.6.0" + moduleVersion = "v4.7.0" ) // AgentPoolMode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go index 6eb21b54b..9ddd6bdbf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go @@ -46,7 +46,7 @@ func NewMaintenanceConfigurationsClient(subscriptionID string, credential azcore // CreateOrUpdate - Creates or updates a maintenance configuration in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -99,7 +99,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateHandleResponse(resp // Delete - Deletes a maintenance configuration. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -171,7 +171,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -180,7 +180,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C // Get - Gets the specified maintenance configuration of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -232,7 +232,7 @@ func (client *MaintenanceConfigurationsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -249,7 +249,7 @@ func (client *MaintenanceConfigurationsClient) getHandleResponse(resp *http.Resp // NewListByManagedClusterPager - Gets a list of maintenance configurations in the specified managed cluster. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - MaintenanceConfigurationsClientListByManagedClusterOptions contains the optional parameters for the MaintenanceConfigurationsClient.NewListByManagedClusterPager @@ -297,7 +297,7 @@ func (client *MaintenanceConfigurationsClient) listByManagedClusterCreateRequest return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go index 04dfeda1c..4bcbed44a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go @@ -48,7 +48,7 @@ func NewManagedClustersClient(subscriptionID string, credential azcore.TokenCred // completes before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginAbortLatestOperationOptions contains the optional parameters for the ManagedClustersClient.BeginAbortLatestOperation @@ -76,7 +76,7 @@ func (client *ManagedClustersClient) BeginAbortLatestOperation(ctx context.Conte // completes before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginAbortLatestOperation" @@ -118,7 +118,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -127,7 +127,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte // BeginCreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The managed cluster to create or update. @@ -153,7 +153,7 @@ func (client *ManagedClustersClient) BeginCreateOrUpdate(ctx context.Context, re // CreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster, options *ManagedClustersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginCreateOrUpdate" @@ -195,7 +195,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -207,7 +207,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con // BeginDelete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginDeleteOptions contains the optional parameters for the ManagedClustersClient.BeginDelete @@ -232,7 +232,7 @@ func (client *ManagedClustersClient) BeginDelete(ctx context.Context, resourceGr // Delete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginDelete" @@ -274,7 +274,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re // Get - Gets a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetOptions contains the optional parameters for the ManagedClustersClient.Get method. @@ -329,7 +329,7 @@ func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *ManagedClustersClient) getHandleResponse(resp *http.Response) (Man // [https://docs.microsoft.com/rest/api/aks/managedclusters/listclusteradmincredentials] . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - roleName - The name of the role for managed cluster accessProfile resource. @@ -401,7 +401,7 @@ func (client *ManagedClustersClient) getAccessProfileCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -419,7 +419,7 @@ func (client *ManagedClustersClient) getAccessProfileHandleResponse(resp *http.R // GetCommandResult - Gets the results of a command which has been run on the Managed Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - commandID - Id of the command. @@ -471,7 +471,7 @@ func (client *ManagedClustersClient) getCommandResultCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -493,7 +493,7 @@ func (client *ManagedClustersClient) getCommandResultHandleResponse(resp *http.R // and available upgrades // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - location - The name of the Azure region. // - mode - The mode of the mesh. // - options - ManagedClustersClientGetMeshRevisionProfileOptions contains the optional parameters for the ManagedClustersClient.GetMeshRevisionProfile @@ -540,7 +540,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -558,7 +558,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileHandleResponse(resp * // GetMeshUpgradeProfile - Gets available upgrades for a service mesh in a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - mode - The mode of the mesh. @@ -610,7 +610,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -628,7 +628,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileHandleResponse(resp *h // GetOSOptions - Gets supported OS options in the specified subscription. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - location - The name of the Azure region. // - options - ManagedClustersClientGetOSOptionsOptions contains the optional parameters for the ManagedClustersClient.GetOSOptions // method. @@ -670,7 +670,7 @@ func (client *ManagedClustersClient) getOSOptionsCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") if options != nil && options.ResourceType != nil { reqQP.Set("resource-type", *options.ResourceType) } @@ -691,7 +691,7 @@ func (client *ManagedClustersClient) getOSOptionsHandleResponse(resp *http.Respo // GetUpgradeProfile - Gets the upgrade profile of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetUpgradeProfileOptions contains the optional parameters for the ManagedClustersClient.GetUpgradeProfile @@ -738,7 +738,7 @@ func (client *ManagedClustersClient) getUpgradeProfileCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -755,7 +755,7 @@ func (client *ManagedClustersClient) getUpgradeProfileHandleResponse(resp *http. // NewListPager - Gets a list of managed clusters in the specified subscription. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - options - ManagedClustersClientListOptions contains the optional parameters for the ManagedClustersClient.NewListPager // method. func (client *ManagedClustersClient) NewListPager(options *ManagedClustersClientListOptions) *runtime.Pager[ManagedClustersClientListResponse] { @@ -793,7 +793,7 @@ func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, opti return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -810,7 +810,7 @@ func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (Ma // NewListByResourceGroupPager - Lists managed clusters in the specified subscription and resource group. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - ManagedClustersClientListByResourceGroupOptions contains the optional parameters for the ManagedClustersClient.NewListByResourceGroupPager // method. @@ -853,7 +853,7 @@ func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -871,7 +871,7 @@ func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *htt // ListClusterAdminCredentials - Lists the admin credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterAdminCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterAdminCredentials @@ -918,7 +918,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -939,7 +939,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsHandleResponse(r // ListClusterMonitoringUserCredentials - Lists the cluster monitoring user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterMonitoringUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterMonitoringUserCredentials @@ -986,7 +986,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -1007,7 +1007,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsHandleR // ListClusterUserCredentials - Lists the user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterUserCredentials @@ -1054,7 +1054,7 @@ func (client *ManagedClustersClient) listClusterUserCredentialsCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -1079,7 +1079,7 @@ func (client *ManagedClustersClient) listClusterUserCredentialsHandleResponse(re // upgrades, and details on preview status of the version // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - location - The name of the Azure region. // - options - ManagedClustersClientListKubernetesVersionsOptions contains the optional parameters for the ManagedClustersClient.ListKubernetesVersions // method. @@ -1121,7 +1121,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1139,7 +1139,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsHandleResponse(resp * // NewListMeshRevisionProfilesPager - Contains extra metadata on each revision, including supported revisions, cluster compatibility // and available upgrades // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - location - The name of the Azure region. // - options - ManagedClustersClientListMeshRevisionProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshRevisionProfilesPager // method. @@ -1182,7 +1182,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1199,7 +1199,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesHandleResponse(resp // NewListMeshUpgradeProfilesPager - Lists available upgrades for all service meshes in a specific cluster. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListMeshUpgradeProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshUpgradeProfilesPager @@ -1247,7 +1247,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1265,7 +1265,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesHandleResponse(resp // NewListOutboundNetworkDependenciesEndpointsPager - Gets a list of egress endpoints (network endpoints of all outbound dependencies) // in the specified managed cluster. The operation returns properties of each egress endpoint. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListOutboundNetworkDependenciesEndpointsOptions contains the optional parameters for the @@ -1313,7 +1313,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsCre return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1332,7 +1332,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsHan // [https://aka.ms/aks-managed-aad] to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The AAD profile to set on the Managed Cluster @@ -1360,7 +1360,7 @@ func (client *ManagedClustersClient) BeginResetAADProfile(ctx context.Context, r // to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) resetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile, options *ManagedClustersClientBeginResetAADProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetAADProfile" @@ -1402,7 +1402,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1414,7 +1414,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co // BeginResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The service principal profile to set on the managed cluster. @@ -1441,7 +1441,7 @@ func (client *ManagedClustersClient) BeginResetServicePrincipalProfile(ctx conte // ResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) resetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile, options *ManagedClustersClientBeginResetServicePrincipalProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetServicePrincipalProfile" @@ -1483,7 +1483,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1496,7 +1496,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c // more details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateClusterCertificatesOptions contains the optional parameters for the ManagedClustersClient.BeginRotateClusterCertificates @@ -1523,7 +1523,7 @@ func (client *ManagedClustersClient) BeginRotateClusterCertificates(ctx context. // details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) rotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateClusterCertificatesOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateClusterCertificates" @@ -1565,7 +1565,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1574,7 +1574,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx // BeginRotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions contains the optional parameters for the ManagedClustersClient.BeginRotateServiceAccountSigningKeys @@ -1600,7 +1600,7 @@ func (client *ManagedClustersClient) BeginRotateServiceAccountSigningKeys(ctx co // RotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) rotateServiceAccountSigningKeys(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateServiceAccountSigningKeys" @@ -1642,7 +1642,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1653,7 +1653,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - requestPayload - The run command request @@ -1682,7 +1682,7 @@ func (client *ManagedClustersClient) BeginRunCommand(ctx context.Context, resour // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) runCommand(ctx context.Context, resourceGroupName string, resourceName string, requestPayload RunCommandRequest, options *ManagedClustersClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRunCommand" @@ -1724,7 +1724,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, requestPayload); err != nil { @@ -1737,7 +1737,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStartOptions contains the optional parameters for the ManagedClustersClient.BeginStart @@ -1764,7 +1764,7 @@ func (client *ManagedClustersClient) BeginStart(ctx context.Context, resourceGro // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) start(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStart" @@ -1806,7 +1806,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1818,7 +1818,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStopOptions contains the optional parameters for the ManagedClustersClient.BeginStop @@ -1847,7 +1847,7 @@ func (client *ManagedClustersClient) BeginStop(ctx context.Context, resourceGrou // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) stop(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStopOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStop" @@ -1889,7 +1889,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1898,7 +1898,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso // BeginUpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update Managed Cluster Tags operation. @@ -1924,7 +1924,7 @@ func (client *ManagedClustersClient) BeginUpdateTags(ctx context.Context, resour // UpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *ManagedClustersClient) updateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject, options *ManagedClustersClientBeginUpdateTagsOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginUpdateTags" @@ -1966,7 +1966,7 @@ func (client *ManagedClustersClient) updateTagsCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go index 53ce459f7..e4480e0ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go @@ -144,6 +144,10 @@ type AgentPoolUpgradeSettings struct { // practices, see: // https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade MaxSurge *string + + // The amount of time (in minutes) to wait after draining a node and before reimaging it and moving on to next node. If not + // specified, the default is 0 minutes. + NodeSoakDurationInMinutes *int32 } // AzureKeyVaultKms - Azure Key Vault key management service settings for the security profile. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go index 5a3eeac7a..da9690495 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go @@ -394,6 +394,7 @@ func (a AgentPoolUpgradeSettings) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "drainTimeoutInMinutes", a.DrainTimeoutInMinutes) populate(objectMap, "maxSurge", a.MaxSurge) + populate(objectMap, "nodeSoakDurationInMinutes", a.NodeSoakDurationInMinutes) return json.Marshal(objectMap) } @@ -412,6 +413,9 @@ func (a *AgentPoolUpgradeSettings) UnmarshalJSON(data []byte) error { case "maxSurge": err = unpopulate(val, "MaxSurge", &a.MaxSurge) delete(rawMsg, key) + case "nodeSoakDurationInMinutes": + err = unpopulate(val, "NodeSoakDurationInMinutes", &a.NodeSoakDurationInMinutes) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go index 7275bdd49..4f3e48455 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of operations. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go index c82074711..22bd5990e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go @@ -46,7 +46,7 @@ func NewPrivateEndpointConnectionsClient(subscriptionID string, credential azcor // BeginDelete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -72,7 +72,7 @@ func (client *PrivateEndpointConnectionsClient) BeginDelete(ctx context.Context, // Delete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *PrivateEndpointConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "PrivateEndpointConnectionsClient.BeginDelete" @@ -118,7 +118,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -127,7 +127,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. // Get - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -179,7 +179,7 @@ func (client *PrivateEndpointConnectionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -197,7 +197,7 @@ func (client *PrivateEndpointConnectionsClient) getHandleResponse(resp *http.Res // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.List @@ -244,7 +244,7 @@ func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *PrivateEndpointConnectionsClient) listHandleResponse(resp *http.Re // Update - Updates a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -315,7 +315,7 @@ func (client *PrivateEndpointConnectionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go index a9e0940b0..9a9f09811 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go @@ -46,7 +46,7 @@ func NewPrivateLinkResourcesClient(subscriptionID string, credential azcore.Toke // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.List @@ -93,7 +93,7 @@ func (client *PrivateLinkResourcesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go index bec89442e..f95952e40 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go @@ -46,7 +46,7 @@ func NewResolvePrivateLinkServiceIDClient(subscriptionID string, credential azco // POST - Gets the private link service ID for the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters required in order to resolve a private link service ID. @@ -94,7 +94,7 @@ func (client *ResolvePrivateLinkServiceIDClient) postCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go index c0a7182fd..6043d81e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go @@ -46,7 +46,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The snapshot to create or update. @@ -94,7 +94,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SnapshotsClient) createOrUpdateHandleResponse(resp *http.Response) // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientDeleteOptions contains the optional parameters for the SnapshotsClient.Delete method. @@ -160,7 +160,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -169,7 +169,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientGetOptions contains the optional parameters for the SnapshotsClient.Get method. @@ -215,7 +215,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -232,7 +232,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // NewListPager - Gets a list of snapshots in the specified subscription. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -269,7 +269,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots in the specified subscription and resource group. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -329,7 +329,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -347,7 +347,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // UpdateTags - Updates tags on a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update snapshot Tags operation. @@ -394,7 +394,7 @@ func (client *SnapshotsClient) updateTagsCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go index 5dcd89d6a..8509aed09 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go @@ -46,7 +46,7 @@ func NewTrustedAccessRoleBindingsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -73,7 +73,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *TrustedAccessRoleBindingsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, trustedAccessRoleBinding TrustedAccessRoleBinding, options *TrustedAccessRoleBindingsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginCreateOrUpdate" @@ -119,7 +119,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, trustedAccessRoleBinding); err != nil { @@ -131,7 +131,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -157,7 +157,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginDelete(ctx context.Context, // Delete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 func (client *TrustedAccessRoleBindingsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, options *TrustedAccessRoleBindingsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginDelete" @@ -203,7 +203,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C // Get - Get a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -264,7 +264,7 @@ func (client *TrustedAccessRoleBindingsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -281,7 +281,7 @@ func (client *TrustedAccessRoleBindingsClient) getHandleResponse(resp *http.Resp // NewListPager - List trusted access role bindings. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - TrustedAccessRoleBindingsClientListOptions contains the optional parameters for the TrustedAccessRoleBindingsClient.NewListPager @@ -329,7 +329,7 @@ func (client *TrustedAccessRoleBindingsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go index 587e9b2bd..bfc856a4b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go @@ -45,7 +45,7 @@ func NewTrustedAccessRolesClient(subscriptionID string, credential azcore.TokenC // NewListPager - List supported trusted access roles. // -// Generated from API version 2023-10-01 +// Generated from API version 2023-11-01 // - location - The name of the Azure region. // - options - TrustedAccessRolesClientListOptions contains the optional parameters for the TrustedAccessRolesClient.NewListPager // method. @@ -88,7 +88,7 @@ func (client *TrustedAccessRolesClient) listCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-10-01") + reqQP.Set("api-version", "2023-11-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index 11263822b..2221e60c4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -82,6 +82,39 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { return scopeCounter == len(scopesOne) } +// needsUpgrade returns true if the given key follows the v1.0 schema i.e., +// it contains an uppercase character (v1.1+ keys are all lowercase) +func needsUpgrade(key string) bool { + for _, r := range key { + if 'A' <= r && r <= 'Z' { + return true + } + } + return false +} + +// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting +// the v1.0 item. Callers must hold an exclusive lock on m. +func upgrade[T any](m map[string]T, k string) T { + v1_1Key := strings.ToLower(k) + v, ok := m[k] + if !ok { + // another goroutine did the upgrade while this one was waiting for the write lock + return m[v1_1Key] + } + if v2, ok := m[v1_1Key]; ok { + // cache has an equivalent v1.1+ item, which we prefer because we know it was added + // by a newer version of the module and is therefore more likely to remain valid. + // The v1.0 item may have expired because only v1.0 or earlier would update it. + v = v2 + } else { + // add an equivalent item according to the v1.1 schema + m[v1_1Key] = v + } + delete(m, k) + return v +} + // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { tr := TokenResponse{} @@ -255,21 +288,25 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken { m.contractMu.RLock() - defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't // an issue, however if it does become a problem then we know where to look. - for _, at := range m.contract.AccessTokens { + for k, at := range m.contract.AccessTokens { if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { - if (at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { - return at + if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { + if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + at = upgrade(m.contract.AccessTokens, k) } + return at } } } } + m.contractMu.RUnlock() return AccessToken{} } @@ -310,15 +347,21 @@ func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 m.contractMu.RLock() - defer m.contractMu.RUnlock() for _, matcher := range matchers { - for _, rt := range m.contract.RefreshTokens { + for k, rt := range m.contract.RefreshTokens { if matcher(rt) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + rt = upgrade(m.contract.RefreshTokens, k) + } return rt, nil } } } + m.contractMu.RUnlock() return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") } @@ -340,14 +383,20 @@ func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) erro func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - for _, idt := range m.contract.IDTokens { + for k, idt := range m.contract.IDTokens { if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { if checkAlias(idt.Environment, envAliases) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + idt = upgrade(m.contract.IDTokens, k) + } return idt, nil } } } + m.contractMu.RUnlock() return IDToken{}, fmt.Errorf("token not found") } @@ -386,7 +435,6 @@ func (m *Manager) Account(homeAccountID string) shared.Account { func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. // We only use a map because the storage contract shared between all language implementations says use a map. @@ -394,11 +442,18 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored // is really low (say 2). Each hash is more expensive than the entire iteration. - for _, acc := range m.contract.Accounts { + for k, acc := range m.contract.Accounts { if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + acc = upgrade(m.contract.Accounts, k) + } return acc, nil } } + m.contractMu.RUnlock() return shared.Account{}, fmt.Errorf("account not found") } @@ -412,13 +467,18 @@ func (m *Manager) writeAccount(account shared.Account) error { func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - - for _, app := range m.contract.AppMetaData { + for k, app := range m.contract.AppMetaData { if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + app = upgrade(m.contract.AppMetaData, k) + } return app, nil } } + m.contractMu.RUnlock() return AppMetaData{}, fmt.Errorf("not found") } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 2221b3d33..e346ff3df 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -51,7 +51,7 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account -var errNoAccount = errors.New("no account was specified with public.WithAccount(), or the specified account is invalid") +var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") // clientOptions configures the Client's behavior. type clientOptions struct { diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md index 6ad1c22bb..ff9c57e1d 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md +++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md @@ -17,7 +17,7 @@ and corresponding updates for existing programs. ## Parsing and Validation Options -Under the hood, a new `validator` struct takes care of validating the claims. A +Under the hood, a new `Validator` struct takes care of validating the claims. A long awaited feature has been the option to fine-tune the validation of tokens. This is now possible with several `ParserOption` functions that can be appended to most `Parse` functions, such as `ParseWithClaims`. The most important options @@ -68,6 +68,16 @@ type Claims interface { } ``` +Users that previously directly called the `Valid` function on their claims, +e.g., to perform validation independently of parsing/verifying a token, can now +use the `jwt.NewValidator` function to create a `Validator` independently of the +`Parser`. + +```go +var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second)) +v.Validate(myClaims) +``` + ### Supported Claim Types and Removal of `StandardClaims` The two standard claim types supported by this library, `MapClaims` and @@ -169,7 +179,7 @@ be a drop-in replacement, if you're having troubles migrating, please open an issue. You can replace all occurrences of `github.com/dgrijalva/jwt-go` or -`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v5`, either manually +`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. And then you'd typically run: diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go index 4ccae2a85..ca85659ba 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf case *ecdsa.PublicKey: ecdsaKey = k default: - return ErrInvalidKeyType + return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType) } if len(sig) != 2*m.KeySize { @@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte case *ecdsa.PrivateKey: ecdsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go index 3db00e4a2..c2138119e 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go @@ -1,11 +1,10 @@ package jwt import ( - "errors" - "crypto" "crypto/ed25519" "crypto/rand" + "errors" ) var ( @@ -39,7 +38,7 @@ func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key inte var ok bool if ed25519Key, ok = key.(ed25519.PublicKey); !ok { - return ErrInvalidKeyType + return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType) } if len(ed25519Key) != ed25519.PublicKeySize { @@ -61,7 +60,7 @@ func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]by var ok bool if ed25519Key, ok = key.(crypto.Signer); !ok { - return nil, ErrInvalidKeyType + return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType) } if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go index 3afb04e64..2ad542f00 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go +++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go @@ -22,7 +22,7 @@ func (je joinedError) Is(err error) bool { // wrappedErrors is a workaround for wrapping multiple errors in environments // where Go 1.20 is not available. It basically uses the already implemented -// functionatlity of joinedError to handle multiple errors with supplies a +// functionality of joinedError to handle multiple errors with supplies a // custom error message that is identical to the one we produce in Go 1.20 using // multiple %w directives. type wrappedErrors struct { diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go index 91b688ba9..96c62722d 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -59,7 +59,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa // Verify the key is the right type keyBytes, ok := key.([]byte) if !ok { - return ErrInvalidKeyType + return newError("HMAC verify expects []byte", ErrInvalidKeyType) } // Can we use the specified hashing method? @@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { if keyBytes, ok := key.([]byte); ok { if !m.Hash.Available() { - return nil, ErrHashUnavailable + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) } hasher := hmac.New(m.Hash.New, keyBytes) diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go index c93daa584..685c2ea30 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/none.go +++ b/vendor/github.com/golang-jwt/jwt/v5/none.go @@ -32,7 +32,7 @@ func (m *signingMethodNone) Verify(signingString string, sig []byte, key interfa return NoneSignatureTypeDisallowedError } // If signing method is none, signature must be an empty string - if string(sig) != "" { + if len(sig) != 0 { return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable) } diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go index f4386fbaa..ecf99af78 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go @@ -18,7 +18,7 @@ type Parser struct { // Skip claims validation during token parsing. skipClaimsValidation bool - validator *validator + validator *Validator decodeStrict bool @@ -28,7 +28,7 @@ type Parser struct { // NewParser creates a new Parser with the specified options func NewParser(options ...ParserOption) *Parser { p := &Parser{ - validator: &validator{}, + validator: &Validator{}, } // Loop through our parsing options and apply them @@ -74,24 +74,40 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } } - // Lookup key - var key interface{} + // Decode signature + token.Signature, err = p.DecodeSegment(parts[2]) + if err != nil { + return token, newError("could not base64 decode signature", ErrTokenMalformed, err) + } + text := strings.Join(parts[0:2], ".") + + // Lookup key(s) if keyFunc == nil { // keyFunc was not provided. short circuiting validation return token, newError("no keyfunc was provided", ErrTokenUnverifiable) } - if key, err = keyFunc(token); err != nil { - return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) - } - // Decode signature - token.Signature, err = p.DecodeSegment(parts[2]) + got, err := keyFunc(token) if err != nil { - return token, newError("could not base64 decode signature", ErrTokenMalformed, err) + return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) } - // Perform signature validation - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + switch have := got.(type) { + case VerificationKeySet: + if len(have.Keys) == 0 { + return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable) + } + // Iterate through keys and verify signature, skipping the rest when a match is found. + // Return the last error if no match is found. + for _, key := range have.Keys { + if err = token.Method.Verify(text, token.Signature, key); err == nil { + break + } + } + default: + err = token.Method.Verify(text, token.Signature, have) + } + if err != nil { return token, newError("", ErrTokenSignatureInvalid, err) } @@ -99,7 +115,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf if !p.skipClaimsValidation { // Make sure we have at least a default validator if p.validator == nil { - p.validator = newValidator() + p.validator = NewValidator() } if err := p.validator.Validate(claims); err != nil { @@ -117,8 +133,8 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // // WARNING: Don't use this method unless you know what you're doing. // -// It's only ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from it. +// It's only ever useful in cases where you know the signature is valid (since it has already +// been or will be checked elsewhere in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { parts = strings.Split(tokenString, ".") if len(parts) != 3 { @@ -130,9 +146,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke // parse Header var headerBytes []byte if headerBytes, err = p.DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed) - } return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err) } if err = json.Unmarshal(headerBytes, &token.Header); err != nil { @@ -140,23 +153,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke } // parse Claims - var claimBytes []byte token.Claims = claims - if claimBytes, err = p.DecodeSegment(parts[1]); err != nil { + claimBytes, err := p.DecodeSegment(parts[1]) + if err != nil { return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err) } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.useJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) + + // If `useJSONNumber` is enabled then we must use *json.Decoder to decode + // the claims. However, this comes with a performance penalty so only use + // it if we must and, otherwise, simple use json.Unmarshal. + if !p.useJSONNumber { + // JSON Unmarshal. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = json.Unmarshal(claimBytes, &c) + } else { + err = json.Unmarshal(claimBytes, &claims) + } } else { - err = dec.Decode(&claims) + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + dec.UseNumber() + // JSON Decode. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } } - // Handle decode error if err != nil { return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err) } diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go index 1b5af970f..88a780fbd 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go +++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go @@ -58,6 +58,14 @@ func WithIssuedAt() ParserOption { } } +// WithExpirationRequired returns the ParserOption to make exp claim required. +// By default exp claim is optional. +func WithExpirationRequired() ParserOption { + return func(p *Parser) { + p.validator.requireExp = true + } +} + // WithAudience configures the validator to require the specified audience in // the `aud` claim. Validation will fail if the audience is not listed in the // token or the `aud` claim is missing. diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go index daff09431..83cbee6ae 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go @@ -51,7 +51,7 @@ func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interfac var ok bool if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType + return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, // Validate type of key if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return nil, ErrInvalidKey + return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go index 9599f0a46..28c386ec4 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go @@ -88,7 +88,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key inter case *rsa.PublicKey: rsaKey = k default: - return ErrInvalidKey + return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType) } // Create hasher @@ -115,7 +115,7 @@ func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byt case *rsa.PrivateKey: rsaKey = k default: - return nil, ErrInvalidKeyType + return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go index c8ad7c783..352873a2d 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/token.go +++ b/vendor/github.com/golang-jwt/jwt/v5/token.go @@ -1,6 +1,7 @@ package jwt import ( + "crypto" "encoding/base64" "encoding/json" ) @@ -9,8 +10,21 @@ import ( // the key for verification. The function receives the parsed, but unverified // Token. This allows you to use properties in the Header of the token (such as // `kid`) to identify which key to use. +// +// The returned interface{} may be a single key or a VerificationKeySet containing +// multiple keys. type Keyfunc func(*Token) (interface{}, error) +// VerificationKey represents a public or secret key for verifying a token's signature. +type VerificationKey interface { + crypto.PublicKey | []uint8 +} + +// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token. +type VerificationKeySet struct { + Keys []VerificationKey +} + // Token represents a JWT Token. Different fields will be used depending on // whether you're creating or parsing/verifying a token. type Token struct { diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go index b82b38867..b2655a9e6 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/types.go +++ b/vendor/github.com/golang-jwt/jwt/v5/types.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "math" - "reflect" "strconv" "time" ) @@ -121,14 +120,14 @@ func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { for _, vv := range v { vs, ok := vv.(string) if !ok { - return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + return ErrInvalidType } aud = append(aud, vs) } case nil: return nil default: - return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + return ErrInvalidType } *s = aud diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go index 385043893..008ecd871 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/validator.go +++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go @@ -28,13 +28,12 @@ type ClaimsValidator interface { Validate() error } -// validator is the core of the new Validation API. It is automatically used by +// Validator is the core of the new Validation API. It is automatically used by // a [Parser] during parsing and can be modified with various parser options. // -// Note: This struct is intentionally not exported (yet) as we want to -// internally finalize its API. In the future, we might make it publicly -// available. -type validator struct { +// The [NewValidator] function should be used to create an instance of this +// struct. +type Validator struct { // leeway is an optional leeway that can be provided to account for clock skew. leeway time.Duration @@ -42,6 +41,9 @@ type validator struct { // validation. If unspecified, this defaults to time.Now. timeFunc func() time.Time + // requireExp specifies whether the exp claim is required + requireExp bool + // verifyIat specifies whether the iat (Issued At) claim will be verified. // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this // only specifies the age of the token, but no validation check is @@ -62,16 +64,28 @@ type validator struct { expectedSub string } -// newValidator can be used to create a stand-alone validator with the supplied +// NewValidator can be used to create a stand-alone validator with the supplied // options. This validator can then be used to validate already parsed claims. -func newValidator(opts ...ParserOption) *validator { +// +// Note: Under normal circumstances, explicitly creating a validator is not +// needed and can potentially be dangerous; instead functions of the [Parser] +// class should be used. +// +// The [Validator] is only checking the *validity* of the claims, such as its +// expiration time, but it does NOT perform *signature verification* of the +// token. +func NewValidator(opts ...ParserOption) *Validator { p := NewParser(opts...) return p.validator } // Validate validates the given claims. It will also perform any custom // validation if claims implements the [ClaimsValidator] interface. -func (v *validator) Validate(claims Claims) error { +// +// Note: It will NOT perform any *signature verification* on the token that +// contains the claims and expects that the [Claim] was already successfully +// verified. +func (v *Validator) Validate(claims Claims) error { var ( now time.Time errs []error = make([]error, 0, 6) @@ -86,8 +100,9 @@ func (v *validator) Validate(claims Claims) error { } // We always need to check the expiration time, but usage of the claim - // itself is OPTIONAL. - if err = v.verifyExpiresAt(claims, now, false); err != nil { + // itself is OPTIONAL by default. requireExp overrides this behavior + // and makes the exp claim mandatory. + if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil { errs = append(errs, err) } @@ -149,7 +164,7 @@ func (v *validator) Validate(claims Claims) error { // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { exp, err := claims.GetExpirationTime() if err != nil { return err @@ -170,7 +185,7 @@ func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { iat, err := claims.GetIssuedAt() if err != nil { return err @@ -191,7 +206,7 @@ func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { +func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { nbf, err := claims.GetNotBefore() if err != nil { return err @@ -211,7 +226,7 @@ func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error { aud, err := claims.GetAudience() if err != nil { return err @@ -247,7 +262,7 @@ func (v *validator) verifyAudience(claims Claims, cmp string, required bool) err // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error { +func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error { iss, err := claims.GetIssuer() if err != nil { return err @@ -267,7 +282,7 @@ func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error // // Additionally, if any error occurs while retrieving the claim, e.g., when its // the wrong type, an ErrTokenUnverifiable error will be returned. -func (v *validator) verifySubject(claims Claims, cmp string, required bool) error { +func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error { sub, err := claims.GetSubject() if err != nil { return err diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc..7ec5ac7ea 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec..dc60082d3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb6..3167b643d 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/k8s.io/cloud-provider/volume/errors/errors.go b/vendor/k8s.io/cloud-provider/volume/errors/errors.go deleted file mode 100644 index 2e5adb176..000000000 --- a/vendor/k8s.io/cloud-provider/volume/errors/errors.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - k8stypes "k8s.io/apimachinery/pkg/types" -) - -// NewDeletedVolumeInUseError returns a new instance of DeletedVolumeInUseError -// error. -func NewDeletedVolumeInUseError(message string) error { - return deletedVolumeInUseError(message) -} - -type deletedVolumeInUseError string - -var _ error = deletedVolumeInUseError("") - -// IsDeletedVolumeInUse returns true if an error returned from Delete() is -// deletedVolumeInUseError -func IsDeletedVolumeInUse(err error) bool { - switch err.(type) { - case deletedVolumeInUseError: - return true - default: - return false - } -} - -func (err deletedVolumeInUseError) Error() string { - return string(err) -} - -// DanglingAttachError indicates volume is attached to a different node -// than we expected. -type DanglingAttachError struct { - msg string - CurrentNode k8stypes.NodeName - DevicePath string -} - -func (err *DanglingAttachError) Error() string { - return err.msg -} - -// NewDanglingError create a new dangling error -func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { - return &DanglingAttachError{ - msg: msg, - CurrentNode: node, - DevicePath: devicePath, - } -} - -// IsDanglingError returns true if an error is DanglingAttachError -func IsDanglingError(err error) bool { - switch err.(type) { - case *DanglingAttachError: - return true - default: - return false - } -} diff --git a/vendor/k8s.io/cloud-provider/volume/helpers/rounding.go b/vendor/k8s.io/cloud-provider/volume/helpers/rounding.go deleted file mode 100644 index 101815738..000000000 --- a/vendor/k8s.io/cloud-provider/volume/helpers/rounding.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "fmt" - "math" - "math/bits" - - "k8s.io/apimachinery/pkg/api/resource" -) - -/* -The Cloud Provider's volume plugins provision disks for corresponding -PersistentVolumeClaims. Cloud Providers use different allocation unit for their -disk sizes. AWS allows you to specify the size as an integer amount of GiB, -while Portworx expects bytes for example. On AWS, if you want a volume of -1500MiB, the actual call to the AWS API should therefore be for a 2GiB disk. -This file contains functions that help rounding a storage request based on a -Cloud Provider's allocation unit. -*/ - -const ( - // GB - GigaByte size - GB = 1000 * 1000 * 1000 - // GiB - GibiByte size - GiB = 1024 * 1024 * 1024 - - // MB - MegaByte size - MB = 1000 * 1000 - // MiB - MebiByte size - MiB = 1024 * 1024 - - // KB - KiloByte size - KB = 1000 - // KiB - KibiByte size - KiB = 1024 -) - -// RoundUpToGiB rounds up given quantity upto chunks of GiB -func RoundUpToGiB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, GiB) -} - -// RoundUpToMB rounds up given quantity to chunks of MB -func RoundUpToMB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, MB) -} - -// RoundUpToMiB rounds up given quantity upto chunks of MiB -func RoundUpToMiB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, MiB) -} - -// RoundUpToKB rounds up given quantity to chunks of KB -func RoundUpToKB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, KB) -} - -// RoundUpToKiB rounds up given quantity to chunks of KiB -func RoundUpToKiB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, KiB) -} - -// RoundUpToB rounds up given quantity to chunks of bytes -func RoundUpToB(size resource.Quantity) (int64, error) { - return roundUpSizeInt64(size, 1) -} - -// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an -// int instead of an int64 and an error if there's overflow -func RoundUpToGiBInt(size resource.Quantity) (int, error) { - return roundUpSizeInt(size, GiB) -} - -// RoundUpToMBInt rounds up given quantity to chunks of MB. It returns an -// int instead of an int64 and an error if there's overflow -func RoundUpToMBInt(size resource.Quantity) (int, error) { - return roundUpSizeInt(size, MB) -} - -// RoundUpToMiBInt rounds up given quantity upto chunks of MiB. It returns an -// int instead of an int64 and an error if there's overflow -func RoundUpToMiBInt(size resource.Quantity) (int, error) { - return roundUpSizeInt(size, MiB) -} - -// RoundUpToKBInt rounds up given quantity to chunks of KB. It returns an -// int instead of an int64 and an error if there's overflow -func RoundUpToKBInt(size resource.Quantity) (int, error) { - return roundUpSizeInt(size, KB) -} - -// RoundUpToKiBInt rounds up given quantity upto chunks of KiB. It returns an -// int instead of an int64 and an error if there's overflow -func RoundUpToKiBInt(size resource.Quantity) (int, error) { - return roundUpSizeInt(size, KiB) -} - -// RoundUpToGiBInt32 rounds up given quantity up to chunks of GiB. It returns an -// int32 instead of an int64 and an error if there's overflow -func RoundUpToGiBInt32(size resource.Quantity) (int32, error) { - return roundUpSizeInt32(size, GiB) -} - -// roundUpSizeInt calculates how many allocation units are needed to accommodate -// a volume of a given size. It returns an int and an error if there's overflow -func roundUpSizeInt(size resource.Quantity, allocationUnitBytes int64) (int, error) { - if bits.UintSize == 32 { - res, err := roundUpSizeInt32(size, allocationUnitBytes) - return int(res), err - } - res, err := roundUpSizeInt64(size, allocationUnitBytes) - return int(res), err -} - -// roundUpSizeInt32 calculates how many allocation units are needed to accommodate -// a volume of a given size. It returns an int32 and an error if there's overflow -func roundUpSizeInt32(size resource.Quantity, allocationUnitBytes int64) (int32, error) { - roundedUpInt32, err := roundUpSizeInt64(size, allocationUnitBytes) - if err != nil { - return 0, err - } - if roundedUpInt32 > math.MaxInt32 { - return 0, fmt.Errorf("quantity %s is too great, overflows int32", size.String()) - } - return int32(roundedUpInt32), nil -} - -// roundUpSizeInt64 calculates how many allocation units are needed to accommodate -// a volume of a given size. It returns an int64 and an error if there's overflow -func roundUpSizeInt64(size resource.Quantity, allocationUnitBytes int64) (int64, error) { - // Use CmpInt64() to find out if the value of "size" would overflow an - // int64 and therefore have Value() return a wrong result. Then, retrieve - // the value as int64 and perform the rounding. - // It's not convenient to use AsScale() and related functions as they don't - // support BinarySI format, nor can we use AsInt64() directly since it's - // only implemented for int64 scaled numbers (int64Amount). - - // CmpInt64() actually returns 0 when comparing an amount bigger than MaxInt64. - if size.CmpInt64(math.MaxInt64) >= 0 { - return 0, fmt.Errorf("quantity %s is too great, overflows int64", size.String()) - } - volumeSizeBytes := size.Value() - - roundedUp := volumeSizeBytes / allocationUnitBytes - if volumeSizeBytes%allocationUnitBytes > 0 { - roundedUp++ - } - return roundedUp, nil -} diff --git a/vendor/k8s.io/cloud-provider/volume/helpers/zones.go b/vendor/k8s.io/cloud-provider/volume/helpers/zones.go deleted file mode 100644 index ff3a39284..000000000 --- a/vendor/k8s.io/cloud-provider/volume/helpers/zones.go +++ /dev/null @@ -1,313 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "fmt" - "hash/fnv" - "math/rand" - "strconv" - "strings" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - cloudvolume "k8s.io/cloud-provider/volume" - "k8s.io/klog/v2" -) - -// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set -func LabelZonesToSet(labelZonesValue string) (sets.String, error) { - return stringToSet(labelZonesValue, cloudvolume.LabelMultiZoneDelimiter) -} - -// ZonesSetToLabelValue converts zones set to label value -func ZonesSetToLabelValue(strSet sets.String) string { - return strings.Join(strSet.UnsortedList(), cloudvolume.LabelMultiZoneDelimiter) -} - -// ZonesToSet converts a string containing a comma separated list of zones to set -func ZonesToSet(zonesString string) (sets.String, error) { - zones, err := stringToSet(zonesString, ",") - if err != nil { - return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", zonesString, err) - } - return zones, nil -} - -// StringToSet converts a string containing list separated by specified delimiter to a set -func stringToSet(str, delimiter string) (sets.String, error) { - zonesSlice := strings.Split(str, delimiter) - zonesSet := make(sets.String) - for _, zone := range zonesSlice { - trimmedZone := strings.TrimSpace(zone) - if trimmedZone == "" { - return make(sets.String), fmt.Errorf( - "%q separated list (%q) must not contain an empty string", - delimiter, - str) - } - zonesSet.Insert(trimmedZone) - } - return zonesSet, nil -} - -// LabelZonesToList converts a PV label value from string containing a delimited list of zones to list -func LabelZonesToList(labelZonesValue string) ([]string, error) { - return stringToList(labelZonesValue, cloudvolume.LabelMultiZoneDelimiter) -} - -// StringToList converts a string containing list separated by specified delimiter to a list -func stringToList(str, delimiter string) ([]string, error) { - zonesSlice := make([]string, 0) - for _, zone := range strings.Split(str, delimiter) { - trimmedZone := strings.TrimSpace(zone) - if trimmedZone == "" { - return nil, fmt.Errorf( - "%q separated list (%q) must not contain an empty string", - delimiter, - str) - } - zonesSlice = append(zonesSlice, trimmedZone) - } - return zonesSlice, nil -} - -// SelectZoneForVolume is a wrapper around SelectZonesForVolume -// to select a single zone for a volume based on parameters -func SelectZoneForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string) (string, error) { - zones, err := SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent, zoneParameter, zonesParameter, zonesWithNodes, node, allowedTopologies, pvcName, 1) - if err != nil { - return "", err - } - zone, ok := zones.PopAny() - if !ok { - return "", fmt.Errorf("could not determine a zone to provision volume in") - } - return zone, nil -} - -// SelectZonesForVolume selects zones for a volume based on several factors: -// node.zone, allowedTopologies, zone/zones parameters from storageclass, -// zones with active nodes from the cluster. The number of zones = replicas. -func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string, numReplicas uint32) (sets.String, error) { - if zoneParameterPresent && zonesParameterPresent { - return nil, fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time") - } - - var zoneFromNode string - // pick one zone from node if present - if node != nil { - // VolumeScheduling implicit since node is not nil - if zoneParameterPresent || zonesParameterPresent { - return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if VolumeBindingMode is set to WaitForFirstConsumer. Please specify allowedTopologies in StorageClass for constraining zones") - } - - // pick node's zone for one of the replicas - var ok bool - zoneFromNode, ok = node.ObjectMeta.Labels[v1.LabelTopologyZone] - if !ok { - zoneFromNode, ok = node.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone] - if !ok { - return nil, fmt.Errorf("Either %s or %s Label for node missing", v1.LabelTopologyZone, v1.LabelFailureDomainBetaZone) - } - } - // if single replica volume and node with zone found, return immediately - if numReplicas == 1 { - return sets.NewString(zoneFromNode), nil - } - } - - // pick zone from allowedZones if specified - allowedZones, err := ZonesFromAllowedTopologies(allowedTopologies) - if err != nil { - return nil, err - } - - if (len(allowedTopologies) > 0) && (allowedZones.Len() == 0) { - return nil, fmt.Errorf("no matchLabelExpressions with %s key found in allowedTopologies. Please specify matchLabelExpressions with %s key", v1.LabelTopologyZone, v1.LabelTopologyZone) - } - - if allowedZones.Len() > 0 { - // VolumeScheduling implicit since allowedZones present - if zoneParameterPresent || zonesParameterPresent { - return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if allowedTopologies specified") - } - // scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones. - // so if zoneFromNode != "", we can safely assume it is part of allowedZones. - zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas) - if err != nil { - return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err) - } - return zones, nil - } - - // pick zone from parameters if present - if zoneParameterPresent { - if numReplicas > 1 { - return nil, fmt.Errorf("zone cannot be specified if desired number of replicas for pv is greather than 1. Please specify zones or allowedTopologies to specify desired zones") - } - return sets.NewString(zoneParameter), nil - } - - if zonesParameterPresent { - if uint32(zonesParameter.Len()) < numReplicas { - return nil, fmt.Errorf("not enough zones found in zones parameter to provision a volume with %d replicas. Found %d zones, need %d zones", numReplicas, zonesParameter.Len(), numReplicas) - } - // directly choose from zones parameter; no zone from node need to be considered - return ChooseZonesForVolume(zonesParameter, pvcName, numReplicas), nil - } - - // pick zone from zones with nodes - if zonesWithNodes.Len() > 0 { - // If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes - zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas) - if err != nil { - return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err) - } - return zones, nil - } - return nil, fmt.Errorf("cannot determine zones to provision volume in") -} - -// ZonesFromAllowedTopologies returns a list of zones specified in allowedTopologies -func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (sets.String, error) { - zones := make(sets.String) - for _, term := range allowedTopologies { - for _, exp := range term.MatchLabelExpressions { - if exp.Key == v1.LabelTopologyZone || exp.Key == v1.LabelFailureDomainBetaZone { - for _, value := range exp.Values { - zones.Insert(value) - } - } else { - return nil, fmt.Errorf("unsupported key found in matchLabelExpressions: %s", exp.Key) - } - } - } - return zones, nil -} - -// chooseZonesForVolumeIncludingZone is a wrapper around ChooseZonesForVolume that ensures zoneToInclude is chosen -// zoneToInclude can either be empty in which case it is ignored. If non-empty, zoneToInclude is expected to be member of zones. -// numReplicas is expected to be > 0 and <= zones.Len() -func chooseZonesForVolumeIncludingZone(zones sets.String, pvcName, zoneToInclude string, numReplicas uint32) (sets.String, error) { - if numReplicas == 0 { - return nil, fmt.Errorf("invalid number of replicas passed") - } - if uint32(zones.Len()) < numReplicas { - return nil, fmt.Errorf("not enough zones found to provision a volume with %d replicas. Need at least %d distinct zones for a volume with %d replicas", numReplicas, numReplicas, numReplicas) - } - if zoneToInclude != "" && !zones.Has(zoneToInclude) { - return nil, fmt.Errorf("zone to be included: %s needs to be member of set: %v", zoneToInclude, zones) - } - if uint32(zones.Len()) == numReplicas { - return zones, nil - } - if zoneToInclude != "" { - zones.Delete(zoneToInclude) - numReplicas = numReplicas - 1 - } - zonesChosen := ChooseZonesForVolume(zones, pvcName, numReplicas) - if zoneToInclude != "" { - zonesChosen.Insert(zoneToInclude) - } - return zonesChosen, nil -} - -// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks. -func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String { - // No zones available, return empty set. - replicaZones := sets.NewString() - if zones.Len() == 0 { - return replicaZones - } - - // We create the volume in a zone determined by the name - // Eventually the scheduler will coordinate placement into an available zone - hash, index := getPVCNameHashAndIndexOffset(pvcName) - - // Zones.List returns zones in a consistent order (sorted) - // We do have a potential failure case where volumes will not be properly spread, - // if the set of zones changes during StatefulSet volume creation. However, this is - // probably relatively unlikely because we expect the set of zones to be essentially - // static for clusters. - // Hopefully we can address this problem if/when we do full scheduler integration of - // PVC placement (which could also e.g. avoid putting volumes in overloaded or - // unhealthy zones) - zoneSlice := zones.List() - - startingIndex := index * numZones - for index = startingIndex; index < startingIndex+numZones; index++ { - zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - replicaZones.Insert(zone) - } - - klog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", - pvcName, replicaZones.UnsortedList(), zoneSlice) - return replicaZones -} - -func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { - if pvcName == "" { - // We should always be called with a name; this shouldn't happen - klog.Warningf("No name defined during volume create; choosing random zone") - - hash = rand.Uint32() - } else { - hashString := pvcName - - // Heuristic to make sure that volumes in a StatefulSet are spread across zones - // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, - // where Id is an integer index. - // Note though that if a StatefulSet pod has multiple claims, we need them to be - // in the same zone, because otherwise the pod will be unable to mount both volumes, - // and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when - // it looks like `ClaimName-StatefulSetName-Id`. - // We continue to round-robin volume names that look like `Name-Id` also; this is a useful - // feature for users that are creating statefulset-like functionality without using statefulsets. - lastDash := strings.LastIndexByte(pvcName, '-') - if lastDash != -1 { - statefulsetIDString := pvcName[lastDash+1:] - statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32) - if err == nil { - // Offset by the statefulsetID, so we round-robin across zones - index = uint32(statefulsetID) - // We still hash the volume name, but only the prefix - hashString = pvcName[:lastDash] - - // In the special case where it looks like `ClaimName-StatefulSetName-Id`, - // hash only the StatefulSetName, so that different claims on the same StatefulSet - // member end up in the same zone. - // Note that StatefulSetName (and ClaimName) might themselves both have dashes. - // We actually just take the portion after the final - of ClaimName-StatefulSetName. - // For our purposes it doesn't much matter (just suboptimal spreading). - lastDash := strings.LastIndexByte(hashString, '-') - if lastDash != -1 { - hashString = hashString[lastDash+1:] - } - - klog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) - } - } - - // We hash the (base) volume name, so we don't bias towards the first N zones - h := fnv.New32() - h.Write([]byte(hashString)) - hash = h.Sum32() - } - - return hash, index -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 98fe6087a..73d4e5a92 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -34,10 +34,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 +github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -46,13 +47,13 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 @@ -113,7 +114,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -177,7 +178,7 @@ github.com/distribution/reference ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v5.7.0+incompatible +# github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/felixge/httpsnoop v1.0.4 @@ -221,7 +222,7 @@ github.com/gogo/protobuf/sortkeys # github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 -# github.com/golang-jwt/jwt/v5 v5.0.0 +# github.com/golang-jwt/jwt/v5 v5.2.0 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -283,7 +284,7 @@ github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20230602010524-ada837c32108 ## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.0 @@ -392,7 +393,7 @@ github.com/pborman/uuid # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml -# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser # github.com/pkg/errors v0.9.1 @@ -753,7 +754,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.0 => k8s.io/api v0.29.0 +# k8s.io/api v0.29.2 => k8s.io/api v0.29.0 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -814,7 +815,7 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.29.0 => k8s.io/apimachinery v0.29.0 +# k8s.io/apimachinery v0.29.2 => k8s.io/apimachinery v0.29.0 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1025,7 +1026,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.0 => k8s.io/client-go v0.29.0 +# k8s.io/client-go v0.29.2 => k8s.io/client-go v0.29.0 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1364,9 +1365,6 @@ k8s.io/cloud-provider/names k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers -k8s.io/cloud-provider/volume -k8s.io/cloud-provider/volume/errors -k8s.io/cloud-provider/volume/helpers # k8s.io/component-base v0.29.0 => k8s.io/component-base v0.29.0 ## explicit; go 1.21 k8s.io/component-base/cli/flag @@ -1554,7 +1552,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240108181414-3a9c6084f8a5 +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -1607,9 +1605,12 @@ sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy +sigs.k8s.io/cloud-provider-azure/pkg/util/sets +sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints +sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240105075710-c4d4895a970b +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient @@ -1651,8 +1652,8 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20231208022044-b9ede3fc98e9 -## explicit; go 1.20 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 +## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go index 4deeb1bb2..46a66e94f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go @@ -65,5 +65,5 @@ func (config *AzureAuthConfig) GetAzureFederatedTokenFile() (string, bool) { if clientCertPath := os.Getenv(utils.AzureFederatedTokenFile); clientCertPath != "" { return clientCertPath, true } - return config.AADClientCertPath, config.UseFederatedWorkloadIdentityExtension + return config.AADFederatedTokenFile, config.UseFederatedWorkloadIdentityExtension } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go new file mode 100644 index 000000000..e8d8af834 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/custom.go @@ -0,0 +1,45 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diskclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" +) + +const PatchOperationName = "DisksClient.Patch" + +func (client *Client) Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) { + ctx = utils.ContextWithClientName(ctx, "DisksClient") + ctx = utils.ContextWithRequestMethod(ctx, "Patch") + ctx = utils.ContextWithResourceGroupName(ctx, resourceGroupName) + ctx = utils.ContextWithSubscriptionID(ctx, client.subscriptionID) + ctx, endSpan := runtime.StartSpan(ctx, CreateOrUpdateOperationName, client.tracer, nil) + defer endSpan(err) + resp, err := utils.NewPollerWrapper(client.DisksClient.BeginUpdate(ctx, resourceGroupName, resourceName, parameters, nil)).WaitforPollerResp(ctx) + if err != nil { + return nil, err + } + if resp != nil { + return &resp.Disk, nil + } + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go index af3d0a903..9b9f85e10 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go @@ -18,6 +18,8 @@ limitations under the License. package diskclient import ( + "context" + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" @@ -29,4 +31,5 @@ type Interface interface { utils.CreateOrUpdateFunc[armcompute.Disk] utils.DeleteFunc[armcompute.Disk] utils.ListFunc[armcompute.Disk] + Patch(ctx context.Context, resourceGroupName string, resourceName string, parameters armcompute.DiskUpdate) (result *armcompute.Disk, err error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go index d2b159e57..f51b85dc8 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go @@ -79,6 +79,7 @@ type ClientFactory interface { GetSecretClient() secretclient.Interface GetSecurityGroupClient() securitygroupclient.Interface GetSnapshotClient() snapshotclient.Interface + GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface GetSubnetClient() subnetclient.Interface GetVaultClient() vaultclient.Interface diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go index c1aed81d6..1c2f36ee1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go @@ -86,7 +86,7 @@ type ClientFactoryImpl struct { routetableclientInterface routetableclient.Interface secretclientInterface secretclient.Interface securitygroupclientInterface securitygroupclient.Interface - snapshotclientInterface snapshotclient.Interface + snapshotclientInterface sync.Map sshpublickeyresourceclientInterface sshpublickeyresourceclient.Interface subnetclientInterface subnetclient.Interface vaultclientInterface vaultclient.Interface @@ -247,7 +247,7 @@ func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, c } //initialize snapshotclient - factory.snapshotclientInterface, err = factory.createSnapshotClient(config.SubscriptionID) + _, err = factory.GetSnapshotClientForSub(config.SubscriptionID) if err != nil { return nil, err } @@ -888,7 +888,24 @@ func (factory *ClientFactoryImpl) createSnapshotClient(subscription string) (sna } func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface { - return factory.snapshotclientInterface + clientImp, _ := factory.snapshotclientInterface.Load(strings.ToLower(factory.facotryConfig.SubscriptionID)) + return clientImp.(snapshotclient.Interface) +} +func (factory *ClientFactoryImpl) GetSnapshotClientForSub(subscriptionID string) (snapshotclient.Interface, error) { + if subscriptionID == "" { + subscriptionID = factory.facotryConfig.SubscriptionID + } + clientImp, loaded := factory.snapshotclientInterface.Load(strings.ToLower(subscriptionID)) + if loaded { + return clientImp.(snapshotclient.Interface), nil + } + //It's not thread safe, but it's ok for now. because it will be called once. + clientImp, err := factory.createSnapshotClient(subscriptionID) + if err != nil { + return nil, err + } + factory.snapshotclientInterface.Store(strings.ToLower(subscriptionID), clientImp) + return clientImp.(snapshotclient.Interface), nil } func (factory *ClientFactoryImpl) createSSHPublicKeyResourceClient(subscription string) (sshpublickeyresourceclient.Interface, error) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go index 763b5b338..8e0445440 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/mock_azclient/interface.go @@ -482,6 +482,21 @@ func (mr *MockClientFactoryMockRecorder) GetSnapshotClient() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClient", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClient)) } +// GetSnapshotClientForSub mocks base method. +func (m *MockClientFactory) GetSnapshotClientForSub(arg0 string) (snapshotclient.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnapshotClientForSub", arg0) + ret0, _ := ret[0].(snapshotclient.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSnapshotClientForSub indicates an expected call of GetSnapshotClientForSub. +func (mr *MockClientFactoryMockRecorder) GetSnapshotClientForSub(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotClientForSub", reflect.TypeOf((*MockClientFactory)(nil).GetSnapshotClientForSub), arg0) +} + // GetSubnetClient mocks base method. func (m *MockClientFactory) GetSubnetClient() subnetclient.Interface { m.ctrl.T.Helper() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go index 1cae1fb80..b6452e6c3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils" ) -// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit +// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit,crossSubFactory=true type Interface interface { utils.GetFunc[armcompute.Snapshot] utils.CreateOrUpdateFunc[armcompute.Snapshot] diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go index 611a5ef5b..ade11e4e3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go @@ -35,4 +35,5 @@ type Interface interface { InstanceView(ctx context.Context, resourceGroupName string, vmName string) (*armcompute.VirtualMachineInstanceView, error) ListVMInstanceView(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) + BeginAttachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.AttachDetachDataDisksRequest, options *armcompute.VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse], error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go index f66f1a40a..fc79f66c3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go @@ -93,9 +93,10 @@ func (c *Client) CreateFileShare(ctx context.Context, resourceGroupName, account if shareOptions == nil { return storage.FileShare{}, fmt.Errorf("share options is nil") } - quota := int32(shareOptions.RequestGiB) - fileShareProperties := &storage.FileShareProperties{ - ShareQuota: "a, + fileShareProperties := &storage.FileShareProperties{} + if shareOptions.RequestGiB > 0 { + quota := int32(shareOptions.RequestGiB) + fileShareProperties.ShareQuota = "a } if shareOptions.Protocol == storage.EnabledProtocolsNFS { fileShareProperties.EnabledProtocols = shareOptions.Protocol diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index 3b2448658..75bd1708b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -113,8 +113,8 @@ const ( // StrRawVersion is the raw version string StrRawVersion string = "raw" - // VirtualMachineScaleSetsDeallocating indicates VMSS instances are in Deallocating state. - VirtualMachineScaleSetsDeallocating = "Deallocating" + // ProvisionStateDeleting indicates VMSS instances are in Deleting state. + ProvisionStateDeleting = "Deleting" // VmssMachineIDTemplate is the vmss manchine ID template VmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s" // VMSetCIDRIPV4TagKey specifies the node ipv4 CIDR mask of the instances on the VMSS or VMAS @@ -132,6 +132,8 @@ const ( ProvisioningStateDeleting = "Deleting" // ProvisioningStateSucceeded ... ProvisioningStateSucceeded = "Succeeded" + // ProvisioningStateUnknown is the unknown provisioning state + ProvisioningStateUnknown = "Unknown" ) // cache @@ -572,3 +574,13 @@ const ( ClusterServiceLoadBalancerHealthProbeDefaultPath = "/healthz" SharedProbeName = "cluster-service-shared-health-probe" ) + +// VM power state +const ( + VMPowerStatePrefix = "PowerState/" + VMPowerStateStopped = "stopped" + VMPowerStateStopping = "stopping" + VMPowerStateDeallocated = "deallocated" + VMPowerStateDeallocating = "deallocating" + VMPowerStateUnknown = "unknown" +) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 83e1b73fc..a44fccfbe 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -31,10 +31,10 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -48,7 +48,6 @@ import ( cloudnodeutil "k8s.io/cloud-provider/node/helpers" nodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" - "sigs.k8s.io/yaml" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" @@ -76,10 +75,14 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient" + + "sigs.k8s.io/yaml" + azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" ratelimitconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" "sigs.k8s.io/cloud-provider-azure/pkg/util/taints" ) @@ -331,11 +334,11 @@ type MultipleStandardLoadBalancerConfigurationSpec struct { // MultipleStandardLoadBalancerConfigurationStatus stores the properties regarding multiple standard load balancers. type MultipleStandardLoadBalancerConfigurationStatus struct { // ActiveServices stores the services that are supposed to use the load balancer. - ActiveServices sets.Set[string] `json:"activeServices" yaml:"activeServices"` + ActiveServices *utilsets.IgnoreCaseSet `json:"activeServices" yaml:"activeServices"` // ActiveNodes stores the nodes that are supposed to be in the load balancer. // It will be used in EnsureHostsInPool to make sure the given ones are in the backend pool. - ActiveNodes sets.Set[string] `json:"activeNodes" yaml:"activeNodes"` + ActiveNodes *utilsets.IgnoreCaseSet `json:"activeNodes" yaml:"activeNodes"` } // HasExtendedLocation returns true if extendedlocation prop are specified. @@ -349,7 +352,6 @@ var ( _ cloudprovider.LoadBalancer = (*Cloud)(nil) _ cloudprovider.Routes = (*Cloud)(nil) _ cloudprovider.Zones = (*Cloud)(nil) - _ cloudprovider.PVLabeler = (*Cloud)(nil) ) // Cloud holds the config and clients @@ -393,17 +395,17 @@ type Cloud struct { // Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes. nodeCachesLock sync.RWMutex // nodeNames holds current nodes for tracking added nodes in VM caches. - nodeNames sets.Set[string] + nodeNames *utilsets.IgnoreCaseSet // nodeZones is a mapping from Zone to a sets.Set[string] of Node's names in the Zone // it is updated by the nodeInformer - nodeZones map[string]sets.Set[string] + nodeZones map[string]*utilsets.IgnoreCaseSet // nodeResourceGroups holds nodes external resource groups nodeResourceGroups map[string]string // unmanagedNodes holds a list of nodes not managed by Azure cloud provider. - unmanagedNodes sets.Set[string] + unmanagedNodes *utilsets.IgnoreCaseSet // excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer. - excludeLoadBalancerNodes sets.Set[string] - nodePrivateIPs map[string]sets.Set[string] + excludeLoadBalancerNodes *utilsets.IgnoreCaseSet + nodePrivateIPs map[string]*utilsets.IgnoreCaseSet nodePrivateIPToNodeNameMap map[string]string // nodeInformerSynced is for determining if the informer has synced. nodeInformerSynced cache.InformerSynced @@ -441,8 +443,7 @@ type Cloud struct { // node-sync-loop routine and service-reconcile routine should not update LoadBalancer at the same time serviceReconcileLock sync.Mutex - *ManagedDiskController - + lockMap *LockMap // multipleStandardLoadBalancerConfigurationsSynced make sure the `reconcileMultipleStandardLoadBalancerConfigurations` // runs only once every time the cloud provide restarts. multipleStandardLoadBalancerConfigurationsSynced bool @@ -457,13 +458,13 @@ type Cloud struct { // NewCloud returns a Cloud with initialized clients func NewCloud(ctx context.Context, config *Config, callFromCCM bool) (cloudprovider.Interface, error) { az := &Cloud{ - nodeNames: sets.New[string](), - nodeZones: map[string]sets.Set[string]{}, + nodeNames: utilsets.NewString(), + nodeZones: map[string]*utilsets.IgnoreCaseSet{}, nodeResourceGroups: map[string]string{}, - unmanagedNodes: sets.New[string](), + unmanagedNodes: utilsets.NewString(), routeCIDRs: map[string]string{}, - excludeLoadBalancerNodes: sets.New[string](), - nodePrivateIPs: map[string]sets.Set[string]{}, + excludeLoadBalancerNodes: utilsets.NewString(), + nodePrivateIPs: map[string]*utilsets.IgnoreCaseSet{}, nodePrivateIPToNodeNameMap: map[string]string{}, } @@ -473,7 +474,9 @@ func NewCloud(ctx context.Context, config *Config, callFromCCM bool) (cloudprovi } az.ipv6DualStackEnabled = true - + if az.lockMap == nil { + az.lockMap = newLockMap() + } return az, nil } @@ -567,7 +570,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, // The default cloud config type is cloudConfigTypeMerge. config.CloudConfigType = configloader.CloudConfigTypeMerge } else { - supportedCloudConfigTypes := sets.New( + supportedCloudConfigTypes := utilsets.NewString( string(configloader.CloudConfigTypeMerge), string(configloader.CloudConfigTypeFile), string(configloader.CloudConfigTypeSecret)) @@ -581,7 +584,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, strings.EqualFold(config.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypePODIP) { config.LoadBalancerBackendPoolConfigurationType = consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration } else { - supportedLoadBalancerBackendPoolConfigurationTypes := sets.New( + supportedLoadBalancerBackendPoolConfigurationTypes := utilsets.NewString( strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration), strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypeNodeIP), strings.ToLower(consts.LoadBalancerBackendPoolConfigurationTypePODIP)) @@ -593,7 +596,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, if config.ClusterServiceLoadBalancerHealthProbeMode == "" { config.ClusterServiceLoadBalancerHealthProbeMode = consts.ClusterServiceLoadBalancerHealthProbeModeServiceNodePort } else { - supportedClusterServiceLoadBalancerHealthProbeModes := sets.New( + supportedClusterServiceLoadBalancerHealthProbeModes := utilsets.NewString( strings.ToLower(consts.ClusterServiceLoadBalancerHealthProbeModeServiceNodePort), strings.ToLower(consts.ClusterServiceLoadBalancerHealthProbeModeShared), ) @@ -623,6 +626,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, return err } + az.lockMap = newLockMap() az.Config = *config az.Environment = *env az.ResourceRequestBackoff = resourceRequestBackoff @@ -728,9 +732,6 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, return err } - if err := InitDiskControllers(az); err != nil { - return err - } // Common controller contains the function // needed by both blob disk and managed disk controllers qps := float32(ratelimitconfig.DefaultAtachDetachDiskQPS) @@ -741,14 +742,6 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *Config, } klog.V(2).Infof("attach/detach disk operation rate limit QPS: %f, Bucket: %d", qps, bucket) - common := &controllerCommon{ - cloud: az, - lockMap: newLockMap(), - AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, - } - - az.ManagedDiskController = &ManagedDiskController{common} - // updating routes and syncing zones only in CCM if callFromCCM { // start delayed route updater. @@ -795,8 +788,8 @@ func (az *Cloud) checkEnableMultipleStandardLoadBalancers() error { return fmt.Errorf("multiple standard load balancers cannot be used with backend pool type %s", consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) } - names := sets.New[string]() - primaryVMSets := sets.New[string]() + names := utilsets.NewString() + primaryVMSets := utilsets.NewString() for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { if names.Has(multiSLBConfig.Name) { return fmt.Errorf("duplicated multiple standard load balancer configuration name %s", multiSLBConfig.Name) @@ -1173,29 +1166,6 @@ func (az *Cloud) ProviderName() string { return consts.CloudProviderName } -func InitDiskControllers(az *Cloud) error { - // Common controller contains the function - // needed by both blob disk and managed disk controllers - - qps := float32(ratelimitconfig.DefaultAtachDetachDiskQPS) - bucket := ratelimitconfig.DefaultAtachDetachDiskBucket - if az.Config.AttachDetachDiskRateLimit != nil { - qps = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitQPSWrite - bucket = az.Config.AttachDetachDiskRateLimit.CloudProviderRateLimitBucketWrite - } - klog.V(2).Infof("attach/detach disk operation rate limit QPS: %f, Bucket: %d", qps, bucket) - - common := &controllerCommon{ - cloud: az, - lockMap: newLockMap(), - AttachDetachInitialDelayInMs: defaultAttachDetachInitialDelayInMs, - } - - az.ManagedDiskController = &ManagedDiskController{common} - - return nil -} - // SetInformers sets informers for Azure cloud provider. func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { klog.Infof("Setting up informers for Azure cloud provider") @@ -1293,15 +1263,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { if newNode != nil { // Add to nodeNames cache. - az.nodeNames.Insert(newNode.ObjectMeta.Name) + az.nodeNames = utilsets.SafeInsert(az.nodeNames, newNode.ObjectMeta.Name) // Add to nodeZones cache. newZone, ok := newNode.ObjectMeta.Labels[v1.LabelTopologyZone] if ok && az.isAvailabilityZone(newZone) { - if az.nodeZones[newZone] == nil { - az.nodeZones[newZone] = sets.New[string]() - } - az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) + az.nodeZones[newZone] = utilsets.SafeInsert(az.nodeZones[newZone], newNode.ObjectMeta.Name) } // Add to nodeResourceGroups cache. @@ -1337,15 +1304,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { // Add to nodePrivateIPs cache for _, address := range getNodePrivateIPAddresses(newNode) { - if az.nodePrivateIPs[strings.ToLower(newNode.Name)] == nil { - az.nodePrivateIPs[strings.ToLower(newNode.Name)] = sets.New[string]() - } if az.nodePrivateIPToNodeNameMap == nil { az.nodePrivateIPToNodeNameMap = make(map[string]string) } klog.V(6).Infof("adding IP address %s of the node %s", address, newNode.Name) - az.nodePrivateIPs[strings.ToLower(newNode.Name)].Insert(address) + az.nodePrivateIPs[strings.ToLower(newNode.Name)] = utilsets.SafeInsert(az.nodePrivateIPs[strings.ToLower(newNode.Name)], address) az.nodePrivateIPToNodeNameMap[address] = newNode.Name } } @@ -1381,7 +1345,7 @@ func (az *Cloud) updateNodeTaint(node *v1.Node) { } // GetActiveZones returns all the zones in which k8s nodes are currently running. -func (az *Cloud) GetActiveZones() (sets.Set[string], error) { +func (az *Cloud) GetActiveZones() (*utilsets.IgnoreCaseSet, error) { if az.nodeInformerSynced == nil { return nil, fmt.Errorf("azure cloud provider doesn't have informers set") } @@ -1392,9 +1356,9 @@ func (az *Cloud) GetActiveZones() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones") } - zones := sets.New[string]() + zones := utilsets.NewString() for zone, nodes := range az.nodeZones { - if len(nodes) > 0 { + if nodes.Len() > 0 { zones.Insert(zone) } } @@ -1429,7 +1393,7 @@ func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) { } // GetNodeNames returns a set of all node names in the k8s cluster. -func (az *Cloud) GetNodeNames() (sets.Set[string], error) { +func (az *Cloud) GetNodeNames() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, return nil. if az.nodeInformerSynced == nil { return nil, nil @@ -1441,14 +1405,14 @@ func (az *Cloud) GetNodeNames() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetNodeNames") } - return sets.New(az.nodeNames.UnsortedList()...), nil + return utilsets.NewString(az.nodeNames.UnsortedList()...), nil } // GetResourceGroups returns a set of resource groups that all nodes are running on. -func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { +func (az *Cloud) GetResourceGroups() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup. if az.nodeInformerSynced == nil { - return sets.New(az.ResourceGroup), nil + return utilsets.NewString(az.ResourceGroup), nil } az.nodeCachesLock.RLock() @@ -1457,7 +1421,7 @@ func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups") } - resourceGroups := sets.New(az.ResourceGroup) + resourceGroups := utilsets.NewString(az.ResourceGroup) for _, rg := range az.nodeResourceGroups { resourceGroups.Insert(rg) } @@ -1466,7 +1430,7 @@ func (az *Cloud) GetResourceGroups() (sets.Set[string], error) { } // GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes). -func (az *Cloud) GetUnmanagedNodes() (sets.Set[string], error) { +func (az *Cloud) GetUnmanagedNodes() (*utilsets.IgnoreCaseSet, error) { // Kubelet won't set az.nodeInformerSynced, always return nil. if az.nodeInformerSynced == nil { return nil, nil @@ -1478,7 +1442,7 @@ func (az *Cloud) GetUnmanagedNodes() (sets.Set[string], error) { return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes") } - return sets.New(az.unmanagedNodes.UnsortedList()...), nil + return utilsets.NewString(az.unmanagedNodes.UnsortedList()...), nil } // ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers". @@ -1502,17 +1466,17 @@ func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, erro return az.excludeLoadBalancerNodes.Has(nodeName), nil } -func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) sets.Set[string] { +func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) *utilsets.IgnoreCaseSet { az.multipleStandardLoadBalancersActiveNodesLock.Lock() defer az.multipleStandardLoadBalancersActiveNodesLock.Unlock() for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { return multiSLBConfig.ActiveNodes } } - return sets.New[string]() + return utilsets.NewString() } func isNodeReady(node *v1.Node) bool { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index e1ba039a8..d4c966e85 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -18,35 +18,18 @@ package provider import ( "context" - "errors" "fmt" "net/http" "path" - "regexp" "strings" - "sync" - "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "github.com/Azure/go-autorest/autorest/azure" - - "k8s.io/apimachinery/pkg/types" - kwait "k8s.io/apimachinery/pkg/util/wait" - cloudprovider "k8s.io/cloud-provider" - volerr "k8s.io/cloud-provider/volume/errors" "k8s.io/klog/v2" - "k8s.io/utils/pointer" - azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" - "sigs.k8s.io/cloud-provider-azure/pkg/consts" - "sigs.k8s.io/cloud-provider-azure/pkg/retry" + "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient" ) const ( - // Disk Caching is not supported for disks 4 TiB and larger - // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching - diskCachingLimit = 4096 // GiB - maxLUN = 64 // max number of LUNs per VM errStatusCode400 = "statuscode=400" errInvalidParameter = `code="invalidparameter"` @@ -56,68 +39,11 @@ const ( attachDiskMapKeySuffix = "attachdiskmap" detachDiskMapKeySuffix = "detachdiskmap" - updateVMRetryDuration = time.Duration(1) * time.Second - updateVMRetryFactor = 3.0 - updateVMRetrySteps = 5 - - // default initial delay in milliseconds for batch disk attach/detach - defaultAttachDetachInitialDelayInMs = 1000 - // WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks // https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator WriteAcceleratorEnabled = "writeacceleratorenabled" - - // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot. - diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s" - - // see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-from-an-existing-managed-disk-in-the-same-or-different-subscription. - managedDiskPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s" ) -var defaultBackOff = kwait.Backoff{ - Steps: 20, - Duration: 2 * time.Second, - Factor: 1.5, - Jitter: 0.0, -} - -var updateVMBackoff = kwait.Backoff{ - Duration: updateVMRetryDuration, - Factor: updateVMRetryFactor, - Steps: updateVMRetrySteps, -} - -var ( - managedDiskPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+)`) - diskSnapshotPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)`) - errorCodeRE = regexp.MustCompile(`Code="(.*?)".*`) -) - -type controllerCommon struct { - diskStateMap sync.Map // - lockMap *lockMap - cloud *Cloud - // disk queue that is waiting for attach or detach on specific node - // > - attachDiskMap sync.Map - detachDiskMap sync.Map - // DisableUpdateCache whether disable update cache in disk attach/detach - DisableUpdateCache bool - // DisableDiskLunCheck whether disable disk lun check after disk attach/detach - DisableDiskLunCheck bool - // AttachDetachInitialDelayInMs determines initial delay in milliseconds for batch disk attach/detach - AttachDetachInitialDelayInMs int -} - -// AttachDiskOptions attach disk options -type AttachDiskOptions struct { - CachingMode compute.CachingTypes - DiskName string - DiskEncryptionSetID string - WriteAcceleratorEnabled bool - Lun int32 -} - // ExtendedLocation contains additional info about the location of resources. type ExtendedLocation struct { // Name - The name of the extended location. @@ -126,504 +52,13 @@ type ExtendedLocation struct { Type string `json:"type,omitempty"` } -// AttachDisk attaches a disk to vm -// occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments -// return (lun, error) -func (c *controllerCommon) AttachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName, - cachingMode compute.CachingTypes, disk *compute.Disk, occupiedLuns []int) (int32, error) { - diskEncryptionSetID := "" - writeAcceleratorEnabled := false - - // there is possibility that disk is nil when GetDisk is throttled - // don't check disk state when GetDisk is throttled - if disk != nil { - if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) { - vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - return -1, err - } - attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy) - if err != nil { - return -1, err - } - if strings.EqualFold(string(nodeName), string(attachedNode)) { - klog.Warningf("volume %s is actually attached to current node %s, invalidate vm cache and return error", diskURI, nodeName) - // update VM(invalidate vm cache) - if errUpdate := c.UpdateVM(ctx, nodeName); errUpdate != nil { - return -1, errUpdate - } - lun, _, err := c.GetDiskLun(diskName, diskURI, nodeName) - return lun, err - } - - attachErr := fmt.Sprintf( - "disk(%s) already attached to node(%s), could not be attached to node(%s)", - diskURI, *disk.ManagedBy, nodeName) - klog.V(2).Infof("found dangling volume %s attached to node %s, could not be attached to node(%s)", diskURI, attachedNode, nodeName) - return -1, volerr.NewDanglingError(attachErr, attachedNode, "") - } - - if disk.DiskProperties != nil { - if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone { - // Disk Caching is not supported for disks 4 TiB and larger - // https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching - cachingMode = compute.CachingTypesNone - klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None", - diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit) - } - - if disk.DiskProperties.Encryption != nil && - disk.DiskProperties.Encryption.DiskEncryptionSetID != nil { - diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID - } - - if disk.DiskProperties.DiskState != compute.Unattached && (disk.MaxShares == nil || *disk.MaxShares <= 1) { - return -1, fmt.Errorf("state of disk(%s) is %s, not in expected %s state", diskURI, disk.DiskProperties.DiskState, compute.Unattached) - } - } - - if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok { - if v != nil && strings.EqualFold(*v, "true") { - writeAcceleratorEnabled = true - } - } - } - - options := AttachDiskOptions{ - Lun: -1, - DiskName: diskName, - CachingMode: cachingMode, - DiskEncryptionSetID: diskEncryptionSetID, - WriteAcceleratorEnabled: writeAcceleratorEnabled, - } - node := strings.ToLower(string(nodeName)) - diskuri := strings.ToLower(diskURI) - requestNum, err := c.insertAttachDiskRequest(diskuri, node, &options) - if err != nil { - return -1, err - } - - c.lockMap.LockEntry(node) - unlock := false - defer func() { - if !unlock { - c.lockMap.UnlockEntry(node) - } - }() - - if c.AttachDetachInitialDelayInMs > 0 && requestNum == 1 { - klog.V(2).Infof("wait %dms for more requests on node %s, current disk attach: %s", c.AttachDetachInitialDelayInMs, node, diskURI) - time.Sleep(time.Duration(c.AttachDetachInitialDelayInMs) * time.Millisecond) - } - - diskMap, err := c.cleanAttachDiskRequests(node) - if err != nil { - return -1, err - } - - lun, err := c.SetDiskLun(nodeName, diskuri, diskMap, occupiedLuns) - if err != nil { - return -1, err - } - - klog.V(2).Infof("Trying to attach volume %s lun %d to node %s, diskMap len:%d, %+v", diskURI, lun, nodeName, len(diskMap), diskMap) - if len(diskMap) == 0 { - if !c.DisableDiskLunCheck { - // always check disk lun after disk attach complete - diskLun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) - if errGetLun != nil { - return -1, fmt.Errorf("disk(%s) could not be found on node(%s), vmState: %s, error: %w", diskURI, nodeName, pointer.StringDeref(vmState, ""), errGetLun) - } - lun = diskLun - } - return lun, nil - } - - vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - return -1, err - } - c.diskStateMap.Store(disk, "attaching") - defer c.diskStateMap.Delete(disk) - - defer func() { - // invalidate the cache if there is error in disk attach - if err != nil { - _ = vmset.DeleteCacheForNode(string(nodeName)) - } - }() - - var future *azure.Future - future, err = vmset.AttachDisk(ctx, nodeName, diskMap) - if err != nil { - return -1, err - } - // err will be handled by waitForUpdateResult below - if err = c.waitForUpdateResult(ctx, vmset, nodeName, future, err); err != nil { - return -1, err - } - - if !c.DisableDiskLunCheck { - // always check disk lun after disk attach complete - diskLun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) - if errGetLun != nil { - return -1, fmt.Errorf("disk(%s) could not be found on node(%s), vmState: %s, error: %w", diskURI, nodeName, pointer.StringDeref(vmState, ""), errGetLun) - } - lun = diskLun - } - return lun, nil -} - -// waitForUpdateResult handles asynchronous VM update operations and retries with backoff if OperationPreempted error is observed -func (c *controllerCommon) waitForUpdateResult(ctx context.Context, vmset VMSet, nodeName types.NodeName, future *azure.Future, updateErr error) (err error) { - err = updateErr - if err == nil { - err = vmset.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") - } - - if vmUpdateRequired(future, err) { - if derr := kwait.ExponentialBackoffWithContext(ctx, updateVMBackoff, func(ctx context.Context) (bool, error) { - klog.Errorf("Retry VM Update on node (%s) due to error (%v)", nodeName, err) - future, err = vmset.UpdateVMAsync(ctx, nodeName) - if err == nil { - err = vmset.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") - } - return !vmUpdateRequired(future, err), nil - }); derr != nil { - err = derr - return - } - } - - if err != nil && configAccepted(future) { - err = retry.NewPartialUpdateError(err.Error()) - } - return -} - -// insertAttachDiskRequest return (attachDiskRequestQueueLength, error) -func (c *controllerCommon) insertAttachDiskRequest(diskURI, nodeName string, options *AttachDiskOptions) (int, error) { - var diskMap map[string]*AttachDiskOptions - attachDiskMapKey := nodeName + attachDiskMapKeySuffix - c.lockMap.LockEntry(attachDiskMapKey) - defer c.lockMap.UnlockEntry(attachDiskMapKey) - v, ok := c.attachDiskMap.Load(nodeName) - if ok { - if diskMap, ok = v.(map[string]*AttachDiskOptions); !ok { - return -1, fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName) - } - } else { - diskMap = make(map[string]*AttachDiskOptions) - c.attachDiskMap.Store(nodeName, diskMap) - } - // insert attach disk request to queue - _, ok = diskMap[diskURI] - if ok { - klog.V(2).Infof("azureDisk - duplicated attach disk(%s) request on node(%s)", diskURI, nodeName) - } else { - diskMap[diskURI] = options - } - return len(diskMap), nil -} - -// clean up attach disk requests -// return original attach disk requests -func (c *controllerCommon) cleanAttachDiskRequests(nodeName string) (map[string]*AttachDiskOptions, error) { - var diskMap map[string]*AttachDiskOptions - - attachDiskMapKey := nodeName + attachDiskMapKeySuffix - c.lockMap.LockEntry(attachDiskMapKey) - defer c.lockMap.UnlockEntry(attachDiskMapKey) - v, ok := c.attachDiskMap.Load(nodeName) - if !ok { - return diskMap, nil - } - if diskMap, ok = v.(map[string]*AttachDiskOptions); !ok { - return diskMap, fmt.Errorf("convert attachDiskMap failure on node(%s)", nodeName) - } - c.attachDiskMap.Store(nodeName, make(map[string]*AttachDiskOptions)) - return diskMap, nil -} - -// DetachDisk detaches a disk from VM -func (c *controllerCommon) DetachDisk(ctx context.Context, diskName, diskURI string, nodeName types.NodeName) error { - if _, err := c.cloud.InstanceID(ctx, nodeName); err != nil { - if errors.Is(err, cloudprovider.InstanceNotFound) { - // if host doesn't exist, no need to detach - klog.Warningf("azureDisk - failed to get azure instance id(%s), DetachDisk(%s) will assume disk is already detached", - nodeName, diskURI) - return nil - } - klog.Warningf("failed to get azure instance id (%v)", err) - return fmt.Errorf("failed to get azure instance id for node %q: %w", nodeName, err) - } - - vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - return err - } - - node := strings.ToLower(string(nodeName)) - disk := strings.ToLower(diskURI) - requestNum, err := c.insertDetachDiskRequest(diskName, disk, node) - if err != nil { - return err - } - - c.lockMap.LockEntry(node) - defer c.lockMap.UnlockEntry(node) - - if c.AttachDetachInitialDelayInMs > 0 && requestNum == 1 { - klog.V(2).Infof("wait %dms for more requests on node %s, current disk detach: %s", c.AttachDetachInitialDelayInMs, node, diskURI) - time.Sleep(time.Duration(c.AttachDetachInitialDelayInMs) * time.Millisecond) - } - diskMap, err := c.cleanDetachDiskRequests(node) - if err != nil { - return err - } - - klog.V(2).Infof("Trying to detach volume %s from node %s, diskMap len:%d, %s", diskURI, nodeName, len(diskMap), diskMap) - if len(diskMap) > 0 { - c.diskStateMap.Store(disk, "detaching") - defer c.diskStateMap.Delete(disk) - if err = vmset.DetachDisk(ctx, nodeName, diskMap); err != nil { - if isInstanceNotFoundError(err) { - // if host doesn't exist, no need to detach - klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached", - err, diskURI) - return nil - } - } - } - - if err != nil { - klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) - return err - } - - if !c.DisableDiskLunCheck { - // always check disk lun after disk detach complete - lun, vmState, errGetLun := c.GetDiskLun(diskName, diskURI, nodeName) - if errGetLun == nil || !strings.Contains(errGetLun.Error(), consts.CannotFindDiskLUN) { - return fmt.Errorf("disk(%s) is still attached to node(%s) on lun(%d), vmState: %s, error: %w", diskURI, nodeName, lun, pointer.StringDeref(vmState, ""), errGetLun) - } - } - - klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) - return nil -} - -// UpdateVM updates a vm -func (c *controllerCommon) UpdateVM(ctx context.Context, nodeName types.NodeName) error { - vmset, err := c.cloud.GetNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - return err - } - node := strings.ToLower(string(nodeName)) - c.lockMap.LockEntry(node) - defer c.lockMap.UnlockEntry(node) - - defer func() { - _ = vmset.DeleteCacheForNode(string(nodeName)) - }() - - klog.V(2).Infof("azureDisk - update: vm(%s)", nodeName) - return vmset.UpdateVM(ctx, nodeName) -} - -// insertDetachDiskRequest return (detachDiskRequestQueueLength, error) -func (c *controllerCommon) insertDetachDiskRequest(diskName, diskURI, nodeName string) (int, error) { - var diskMap map[string]string - detachDiskMapKey := nodeName + detachDiskMapKeySuffix - c.lockMap.LockEntry(detachDiskMapKey) - defer c.lockMap.UnlockEntry(detachDiskMapKey) - v, ok := c.detachDiskMap.Load(nodeName) - if ok { - if diskMap, ok = v.(map[string]string); !ok { - return -1, fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName) - } - } else { - diskMap = make(map[string]string) - c.detachDiskMap.Store(nodeName, diskMap) - } - // insert detach disk request to queue - _, ok = diskMap[diskURI] - if ok { - klog.V(2).Infof("azureDisk - duplicated detach disk(%s) request on node(%s)", diskURI, nodeName) - } else { - diskMap[diskURI] = diskName - } - return len(diskMap), nil -} - -// clean up detach disk requests -// return original detach disk requests -func (c *controllerCommon) cleanDetachDiskRequests(nodeName string) (map[string]string, error) { - var diskMap map[string]string - - detachDiskMapKey := nodeName + detachDiskMapKeySuffix - c.lockMap.LockEntry(detachDiskMapKey) - defer c.lockMap.UnlockEntry(detachDiskMapKey) - v, ok := c.detachDiskMap.Load(nodeName) - if !ok { - return diskMap, nil - } - if diskMap, ok = v.(map[string]string); !ok { - return diskMap, fmt.Errorf("convert detachDiskMap failure on node(%s)", nodeName) - } - // clean up original requests in disk map - c.detachDiskMap.Store(nodeName, make(map[string]string)) - return diskMap, nil -} - -// GetNodeDataDisks invokes vmSet interfaces to get data disks for the node. -func (c *controllerCommon) GetNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { - vmset, err := c.cloud.GetNodeVMSet(nodeName, crt) - if err != nil { - return nil, nil, err - } - - return vmset.GetDataDisks(nodeName, crt) -} - -// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI. -func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, *string, error) { - // GetNodeDataDisks need to fetch the cached data/fresh data if cache expired here - // to ensure we get LUN based on latest entry. - disks, provisioningState, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeDefault) - if err != nil { - klog.Errorf("error of getting data disks for node %s: %v", nodeName, err) - return -1, provisioningState, err - } - - for _, disk := range disks { - if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) || - (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || - (disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { - if disk.ToBeDetached != nil && *disk.ToBeDetached { - klog.Warningf("azureDisk - find disk(ToBeDetached): lun %d name %s uri %s", *disk.Lun, diskName, diskURI) - } else { - // found the disk - klog.V(2).Infof("azureDisk - find disk: lun %d name %s uri %s", *disk.Lun, diskName, diskURI) - return *disk.Lun, provisioningState, nil - } - } - } - return -1, provisioningState, fmt.Errorf("%s for disk %s", consts.CannotFindDiskLUN, diskName) -} - -// SetDiskLun find unused luns and allocate lun for every disk in diskMap. -// occupiedLuns is used to avoid conflict with other disk attach in k8s VolumeAttachments -// Return lun of diskURI, -1 if all luns are used. -func (c *controllerCommon) SetDiskLun(nodeName types.NodeName, diskURI string, diskMap map[string]*AttachDiskOptions, occupiedLuns []int) (int32, error) { - disks, _, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeDefault) - if err != nil { - klog.Errorf("error of getting data disks for node %s: %v", nodeName, err) - return -1, err - } - - lun := int32(-1) - _, isDiskInMap := diskMap[diskURI] - used := make([]bool, maxLUN) - for _, disk := range disks { - if disk.Lun != nil { - used[*disk.Lun] = true - if !isDiskInMap { - // find lun of diskURI since diskURI is not in diskMap - if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) { - lun = *disk.Lun - } - } - } - } - if !isDiskInMap && lun < 0 { - return -1, fmt.Errorf("could not find disk(%s) in current disk list(len: %d) nor in diskMap(%v)", diskURI, len(disks), diskMap) - } - if len(diskMap) == 0 { - // attach disk request is empty, return directly - return lun, nil - } - - for _, lun := range occupiedLuns { - used[int32(lun)] = true - } - - // allocate lun for every disk in diskMap - var diskLuns []int32 - count := 0 - for k, v := range used { - if !v { - diskLuns = append(diskLuns, int32(k)) - count++ - if count >= len(diskMap) { - break - } - } - } - - if len(diskLuns) != len(diskMap) { - return -1, fmt.Errorf("could not find enough disk luns(current: %d) for diskMap(%v, len=%d), diskURI(%s)", - len(diskLuns), diskMap, len(diskMap), diskURI) - } - - count = 0 - for uri, opt := range diskMap { - if opt == nil { - return -1, fmt.Errorf("unexpected nil pointer in diskMap(%v), diskURI(%s)", diskMap, diskURI) - } - if strings.EqualFold(uri, diskURI) { - lun = diskLuns[count] - } - opt.Lun = diskLuns[count] - count++ - } - if lun < 0 { - return lun, fmt.Errorf("could not find lun of diskURI(%s), diskMap(%v)", diskURI, diskMap) - } - return lun, nil -} - -// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName. -func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { - attached := make(map[string]bool) - for _, diskName := range diskNames { - attached[diskName] = false - } - - // doing stalled read for GetNodeDataDisks to ensure we don't call ARM - // for every reconcile call. The cache is invalidated after Attach/Detach - // disk. So the new entry will be fetched and cached the first time reconcile - // loop runs after the Attach/Disk OP which will reflect the latest model. - disks, _, err := c.GetNodeDataDisks(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - if errors.Is(err, cloudprovider.InstanceNotFound) { - // if host doesn't exist, no need to detach - klog.Warningf("azureDisk - Cannot find node %s, DisksAreAttached will assume disks %v are not attached to it.", - nodeName, diskNames) - return attached, nil - } - - return attached, err - } - - for _, disk := range disks { - for _, diskName := range diskNames { - if disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName) { - attached[diskName] = true - } - } - } - - return attached, nil -} - -func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk { +func FilterNonExistingDisks(ctx context.Context, diskClient diskclient.Interface, unfilteredDisks []compute.DataDisk) []compute.DataDisk { filteredDisks := []compute.DataDisk{} for _, disk := range unfilteredDisks { filter := false if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil { diskURI := *disk.ManagedDisk.ID - exist, err := c.cloud.checkDiskExists(ctx, diskURI) + exist, err := checkDiskExists(ctx, diskClient, diskURI) if err != nil { klog.Errorf("checkDiskExists(%s) failed with error: %v", diskURI, err) } else { @@ -642,14 +77,14 @@ func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfiltere return filteredDisks } -func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) (bool, error) { +func checkDiskExists(ctx context.Context, diskClient diskclient.Interface, diskURI string) (bool, error) { diskName := path.Base(diskURI) resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) if err != nil { return false, err } - if _, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName); rerr != nil { + if _, rerr := diskClient.Get(ctx, subsID, resourceGroup, diskName); rerr != nil { if rerr.HTTPStatusCode == http.StatusNotFound { return false, nil } @@ -659,73 +94,13 @@ func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) return true, nil } -func vmUpdateRequired(future *azure.Future, err error) bool { - errCode := getAzureErrorCode(err) - return configAccepted(future) && errCode == consts.OperationPreemptedErrorCode -} - -func getValidCreationData(subscriptionID, resourceGroup string, options *ManagedDiskOptions) (compute.CreationData, error) { - if options.SourceResourceID == "" { - return compute.CreationData{ - CreateOption: compute.Empty, - PerformancePlus: options.PerformancePlus, - }, nil - } - - sourceResourceID := options.SourceResourceID - switch options.SourceType { - case sourceSnapshot: - if match := diskSnapshotPathRE.FindString(sourceResourceID); match == "" { - sourceResourceID = fmt.Sprintf(diskSnapshotPath, subscriptionID, resourceGroup, sourceResourceID) - } - - case sourceVolume: - if match := managedDiskPathRE.FindString(sourceResourceID); match == "" { - sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID) - } - default: - return compute.CreationData{ - CreateOption: compute.Empty, - PerformancePlus: options.PerformancePlus, - }, nil - } - - splits := strings.Split(sourceResourceID, "/") - if len(splits) > 9 { - if options.SourceType == sourceSnapshot { - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE) - } - return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE) - } - return compute.CreationData{ - CreateOption: compute.Copy, - SourceResourceID: &sourceResourceID, - PerformancePlus: options.PerformancePlus, - }, nil -} - -func isInstanceNotFoundError(err error) bool { - errMsg := strings.ToLower(err.Error()) - if strings.Contains(errMsg, strings.ToLower(consts.VmssVMNotActiveErrorMessage)) { - return true +// get resource group name, subs id from a managed disk URI, e.g. return {group-name}, {sub-id} according to +// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id} +// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get +func getInfoFromDiskURI(diskURI string) (string, string, error) { + fields := strings.Split(diskURI, "/") + if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" { + return "", "", fmt.Errorf("invalid disk URI: %s", diskURI) } - return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds) -} - -// getAzureErrorCode uses regex to parse out the error code encapsulated in the error string. -func getAzureErrorCode(err error) string { - if err == nil { - return "" - } - matches := errorCodeRE.FindStringSubmatch(err.Error()) - if matches == nil { - return "" - } - return matches[1] -} - -// configAccepted returns true if storage profile change had been committed (i.e. HTTP status code == 2xx) and returns false otherwise. -func configAccepted(future *azure.Future) bool { - // if status code indicates success, the storage profile change was committed - return future != nil && future.Response() != nil && future.Response().StatusCode/100 == 2 + return fields[4], fields[2], nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index 229e061b5..e47b8296d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -35,16 +36,16 @@ import ( ) // AttachDisk attaches a disk to vm -func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { +func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault) if err != nil { - return nil, err + return err } vmName := mapNodeNameToVMName(nodeName) nodeResourceGroup, err := as.GetNodeResourceGroup(vmName) if err != nil { - return nil, err + return err } disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) @@ -60,7 +61,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) + return fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } if attached { @@ -106,7 +107,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa klog.Errorf("azureDisk - attach disk list(%v) on rg(%s) vm(%s) failed, err: %+v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) - disks := as.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, as.DisksClient, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks future, rerr = as.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") } @@ -114,13 +115,13 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%v) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { - return future, rerr.Error() + return rerr.Error() } - return future, nil + return as.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") } func (as *availabilitySet) DeleteCacheForNode(nodeName string) error { - err := as.cloud.vmCache.Delete(nodeName) + err := as.vmCache.Delete(nodeName) if err == nil { klog.V(2).Infof("DeleteCacheForNode(%s) successfully", nodeName) } else { @@ -187,7 +188,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa // only log here, next action is to update VM status with original meta data klog.Warningf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap) } else { - if strings.EqualFold(as.cloud.Environment.Name, consts.AzureStackCloudName) && !as.Config.DisableAzureStackCloud { + if strings.EqualFold(as.Environment.Name, consts.AzureStackCloudName) && !as.Config.DisableAzureStackCloud { // Azure stack does not support ToBeDetached flag, use original way to detach disk newDisks := []compute.DataDisk{} for _, disk := range disks { @@ -226,7 +227,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) - disks := as.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, as.DisksClient, *vm.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks result, rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") } @@ -264,15 +265,12 @@ func (as *availabilitySet) UpdateVMAsync(ctx context.Context, nodeName types.Nod } func (as *availabilitySet) updateCache(nodeName string, vm *compute.VirtualMachine) { - if as.ManagedDiskController.DisableUpdateCache { - return - } - as.cloud.vmCache.Update(nodeName, vm) + as.vmCache.Update(nodeName, vm) klog.V(2).Infof("updateCache(%s) successfully", nodeName) } // GetDataDisks gets a list of data disks attached to the node. -func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := as.getVirtualMachine(nodeName, crt) if err != nil { return nil, nil, err @@ -282,5 +280,9 @@ func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.Azu return nil, nil, nil } - return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil + result, err := ToArmcomputeDisk(*vm.StorageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.ProvisioningState, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 650c13f62..fada0393f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" @@ -35,16 +36,16 @@ import ( ) // AttachDisk attaches a disk to vm -func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { +func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { - return nil, err + return err } nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) if err != nil { - return nil, err + return err } var disks []compute.DataDisk @@ -66,7 +67,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) + return fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } @@ -113,7 +114,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) - disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, ss.DisksClient, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") } @@ -121,9 +122,9 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) if rerr != nil { - return future, rerr.Error() + return rerr.Error() } - return future, nil + return ss.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") } // WaitForUpdateResult waits for the response of the update request @@ -201,7 +202,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis // only log here, next action is to update VM status with original meta data klog.Warningf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap) } else { - if strings.EqualFold(ss.cloud.Environment.Name, consts.AzureStackCloudName) && !ss.Config.DisableAzureStackCloud { + if strings.EqualFold(ss.Environment.Name, consts.AzureStackCloudName) && !ss.Config.DisableAzureStackCloud { // Azure stack does not support ToBeDetached flag, use original way to detach disk var newDisks []compute.DataDisk for _, disk := range disks { @@ -243,7 +244,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) - disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, ss.DisksClient, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks result, rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "detach_disk") } @@ -286,7 +287,7 @@ func (ss *ScaleSet) UpdateVMAsync(ctx context.Context, nodeName types.NodeName) } // GetDataDisks gets a list of data disks attached to the node. -func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := ss.getVmssVM(string(nodeName), crt) if err != nil { return nil, nil, err @@ -298,8 +299,11 @@ func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCache if storageProfile == nil || storageProfile.DataDisks == nil { return nil, nil, nil } - - return *storageProfile.DataDisks, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil + result, err := ToArmcomputeDisk(*storageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil } return nil, nil, nil diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index 9fa834934..d34ea871e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -23,6 +23,7 @@ import ( "strings" "sync" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/go-autorest/autorest/azure" "k8s.io/apimachinery/pkg/types" @@ -35,16 +36,16 @@ import ( ) // AttachDisk attaches a disk to vm -func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { +func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { vmName := mapNodeNameToVMName(nodeName) vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault) if err != nil { - return nil, err + return err } nodeResourceGroup, err := fs.GetNodeResourceGroup(vmName) if err != nil { - return nil, err + return err } disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) @@ -60,7 +61,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, attached = true break } - return nil, fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) + return fmt.Errorf("disk(%s) already attached to node(%s) on LUN(%d), but target LUN is %d", diskURI, nodeName, *disk.Lun, opt.Lun) } } if attached { @@ -107,7 +108,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) - disks := fs.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, fs.DisksClient, *newVM.VirtualMachineProperties.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks future, rerr = fs.VirtualMachinesClient.UpdateAsync(ctx, nodeResourceGroup, *vm.Name, newVM, "attach_disk") } @@ -115,9 +116,9 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) if rerr != nil { - return future, rerr.Error() + return rerr.Error() } - return future, nil + return fs.WaitForUpdateResult(ctx, future, nodeName, "attach_disk") } // DetachDisk detaches a disk from VM @@ -156,7 +157,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, // only log here, next action is to update VM status with original meta data klog.Warningf("detach azure disk on node(%s): disk list(%s) not found", nodeName, diskMap) } else { - if strings.EqualFold(fs.cloud.Environment.Name, consts.AzureStackCloudName) && !fs.Config.DisableAzureStackCloud { + if strings.EqualFold(fs.Environment.Name, consts.AzureStackCloudName) && !fs.Config.DisableAzureStackCloud { // Azure stack does not support ToBeDetached flag, use original way to detach disk newDisks := []compute.DataDisk{} for _, disk := range disks { @@ -197,7 +198,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, vmName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, vmName) - disks := fs.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks) + disks := FilterNonExistingDisks(ctx, fs.DisksClient, *vm.StorageProfile.DataDisks) newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks result, rerr = fs.VirtualMachinesClient.Update(ctx, nodeResourceGroup, *vm.Name, newVM, "detach_disk") } @@ -263,9 +264,6 @@ func (fs *FlexScaleSet) UpdateVMAsync(ctx context.Context, nodeName types.NodeNa } func (fs *FlexScaleSet) updateCache(nodeName string, vm *compute.VirtualMachine) error { - if fs.ManagedDiskController.DisableUpdateCache { - return nil - } if vm == nil { return fmt.Errorf("vm is nil") } @@ -300,7 +298,7 @@ func (fs *FlexScaleSet) updateCache(nodeName string, vm *compute.VirtualMachine) } // GetDataDisks gets a list of data disks attached to the node. -func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) { vm, err := fs.getVmssFlexVM(string(nodeName), crt) if err != nil { return nil, nil, err @@ -309,6 +307,9 @@ func (fs *FlexScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureC if vm.StorageProfile.DataDisks == nil { return nil, nil, nil } - - return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil + result, err := ToArmcomputeDisk(*vm.StorageProfile.DataDisks) + if err != nil { + return nil, nil, err + } + return result, vm.ProvisioningState, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go index 8ba4ac31a..60d6ab70f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go @@ -25,7 +25,6 @@ import ( "go.uber.org/mock/gomock" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient" @@ -42,6 +41,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient" "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // NewTestScaleSet creates a fake ScaleSet for unit test @@ -96,14 +96,15 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { VMType: consts.VMTypeStandard, LoadBalancerBackendPoolConfigurationType: consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration, }, - nodeZones: map[string]sets.Set[string]{}, + nodeZones: map[string]*utilsets.IgnoreCaseSet{}, nodeInformerSynced: func() bool { return true }, nodeResourceGroups: map[string]string{}, - unmanagedNodes: sets.New[string](), - excludeLoadBalancerNodes: sets.New[string](), - nodePrivateIPs: map[string]sets.Set[string]{}, + unmanagedNodes: utilsets.NewString(), + excludeLoadBalancerNodes: utilsets.NewString(), + nodePrivateIPs: map[string]*utilsets.IgnoreCaseSet{}, routeCIDRs: map[string]string{}, eventRecorder: &record.FakeRecorder{}, + lockMap: newLockMap(), } az.DisksClient = mockdiskclient.NewMockInterface(ctrl) az.SnapshotsClient = mocksnapshotclient.NewMockInterface(ctrl) @@ -129,8 +130,6 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az.LoadBalancerBackendPool = NewMockBackendPool(ctrl) az.storageAccountCache, _ = az.newStorageAccountCache() - _ = InitDiskControllers(az) - az.regionZonesMap = map[string][]string{az.Location: {"1", "2", "3"}} return az diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go index e8a4a76f4..2e5c9ebbf 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances_v1.go @@ -35,12 +35,6 @@ import ( var _ cloudprovider.Instances = (*Cloud)(nil) const ( - vmPowerStatePrefix = "PowerState/" - vmPowerStateStopped = "stopped" - vmPowerStateDeallocated = "deallocated" - vmPowerStateDeallocating = "deallocating" - vmPowerStateUnknown = "unknown" - // nodeNameEnvironmentName is the environment variable name for getting node name. // It is only used for out-of-tree cloud provider. nodeNameEnvironmentName = "NODE_NAME" @@ -266,7 +260,7 @@ func (az *Cloud) InstanceShutdownByProviderID(_ context.Context, providerID stri status := strings.ToLower(powerStatus) provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(consts.ProvisioningStateSucceeded))) - return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil + return provisioningSucceeded && (status == consts.VMPowerStateStopped || status == consts.VMPowerStateDeallocated || status == consts.VMPowerStateDeallocating), nil } func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index 168efc90c..c4a4d2c50 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -36,7 +36,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog/v2" @@ -50,6 +49,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer" "sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) var _ cloudprovider.LoadBalancer = (*Cloud)(nil) @@ -269,7 +269,7 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser } if !serviceExists { isOperationSucceeded = true - klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because service is going to be deleted", service.Name) + klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because service is going to be deleted", serviceName) return nil } @@ -394,10 +394,10 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust return false } - lbName := strings.TrimSuffix(currLBName, consts.InternalLoadBalancerNameSuffix) + lbName := trimSuffixIgnoreCase(currLBName, consts.InternalLoadBalancerNameSuffix) // change the LB from vmSet dedicated to primary if the vmSet becomes the primary one if strings.EqualFold(lbName, vmSetName) { - if lbName != clusterName && + if !strings.EqualFold(lbName, clusterName) && strings.EqualFold(az.VMSet.GetPrimaryVMSetName(), vmSetName) { klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName) return true @@ -593,15 +593,13 @@ func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vm // Remove corresponding nodes in ActiveNodes and nodesWithCorrectLoadBalancerByPrimaryVMSet. for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold( - strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), + trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name, ) { - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes != nil { - for nodeName := range az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes { - az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Delete(strings.ToLower(nodeName)) - } + for _, nodeName := range az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes.UnsortedList() { + az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Delete(nodeName) } - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = sets.New[string]() + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = utilsets.NewString() break } } @@ -1597,12 +1595,9 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( lbName, ruleName, svcName, ) for i := range az.MultipleStandardLoadBalancerConfigurations { - if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { + if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() - } - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) az.multipleStandardLoadBalancersActiveServicesLock.Unlock() klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: service(%s) is active on lb(%s)", svcName, lbName) } @@ -1918,9 +1913,9 @@ func (az *Cloud) removeNodeFromLBConfig(nodeNameToLBConfigIDXMap map[string]int, // removeDeletedNodesFromLoadBalancerConfigurations removes the deleted nodes // that do not exist in nodes list from the load balancer configurations func (az *Cloud) removeDeletedNodesFromLoadBalancerConfigurations(nodes []*v1.Node) map[string]int { - nodeNamesSet := sets.New[string]() + nodeNamesSet := utilsets.NewString() for _, node := range nodes { - nodeNamesSet.Insert(strings.ToLower(node.Name)) + nodeNamesSet.Insert(node.Name) } az.multipleStandardLoadBalancersActiveNodesLock.Lock() @@ -1930,12 +1925,12 @@ func (az *Cloud) removeDeletedNodesFromLoadBalancerConfigurations(nodes []*v1.No nodeNameToLBConfigIDXMap := make(map[string]int) for i, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { if multiSLBConfig.ActiveNodes != nil { - for nodeName := range multiSLBConfig.ActiveNodes { + for _, nodeName := range multiSLBConfig.ActiveNodes.UnsortedList() { if nodeNamesSet.Has(nodeName) { nodeNameToLBConfigIDXMap[nodeName] = i } else { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerBackendNodes: node(%s) is gone, remove it from lb(%s)", nodeName, multiSLBConfig.Name) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, _ = safeRemoveKeyFromStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, strings.ToLower(nodeName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes.Delete(nodeName) } } } @@ -1966,19 +1961,19 @@ func (az *Cloud) accommodateNodesByPrimaryVMSet( multiSLBConfig := az.MultipleStandardLoadBalancerConfigurations[i] if strings.EqualFold(multiSLBConfig.PrimaryVMSet, vmSetName) { foundPrimaryLB := isLBInList(lbs, multiSLBConfig.Name) - if !foundPrimaryLB && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !foundPrimaryLB && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s), but the lb is not found and will not be created this time, will ignore the primaryVMSet", node.Name, multiSLBConfig.Name, vmSetName) continue } - az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Store(strings.ToLower(node.Name), sets.Empty{}) + az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Store(strings.ToLower(node.Name), struct{}{}) if !multiSLBConfig.ActiveNodes.Has(node.Name) { klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s)", node.Name, multiSLBConfig.Name, vmSetName) az.removeNodeFromLBConfig(nodeNameToLBConfigIDXMap, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Lock() - az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = safeAddKeyToStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, strings.ToLower(node.Name)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveNodes, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() } break @@ -2018,7 +2013,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( if nodeSelector.Matches(labels.Set(node.Labels)) { klog.V(4).Infof("accommodateNodesByNodeSelector: lb(%s) matches node(%s) labels", multiSLBConfig.Name, node.Name) found := isLBInList(lbs, multiSLBConfig.Name) - if !found && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !found && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByNodeSelector: but the lb is not found and will not be created this time, will ignore this load balancer") continue } @@ -2039,7 +2034,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( for i := len(eligibleLBsIDX) - 1; i >= 0; i-- { multiSLBConfig := az.MultipleStandardLoadBalancerConfigurations[eligibleLBsIDX[i]] found := isLBInList(lbs, multiSLBConfig.Name) - if !found && !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { + if !found && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { klog.V(4).Infof("accommodateNodesByNodeSelector: the load balancer %s is a valid placement target for node %s, but the lb is not found and will not be created this time, ignore this load balancer", multiSLBConfig.Name, node.Name) eligibleLBsIDX = append(eligibleLBsIDX[:i], eligibleLBsIDX[i+1:]...) } @@ -2077,7 +2072,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( klog.V(4).Infof("accommodateNodesByNodeSelector: node(%s) should be on lb(%s) it is the eligible LB with fewest number of nodes", node.Name, az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].Name) az.multipleStandardLoadBalancersActiveNodesLock.Lock() - az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes = safeAddKeyToStringsSet(az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes, strings.ToLower(node.Name)) + az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() } @@ -2088,7 +2083,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( func isLBInList(lbs *[]network.LoadBalancer, lbConfigName string) bool { if lbs != nil { for _, lb := range *lbs { - if strings.EqualFold(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), lbConfigName) { + if strings.EqualFold(trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix), lbConfigName) { return true } } @@ -2135,20 +2130,17 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerBackendNodes( } func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb bool, svcName, lbName string) { - lbName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix) + lbName = trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix) for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(lbName, az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() - if az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices == nil { - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = sets.New[string]() - } if wantLb { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is active on lb(%s)", svcName, lbName) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Insert(strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) } else { klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is not active on lb(%s) any more", svcName, lbName) - az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, _ = safeRemoveKeyFromStringsSet(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, strings.ToLower(svcName)) + az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Delete(svcName) } az.multipleStandardLoadBalancersActiveServicesLock.Unlock() break @@ -3298,7 +3290,7 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string // Check whether there are still load balancer rules referring to it. if len(referencedLBRules) > 0 { - referencedLBRuleIDs := sets.New[string]() + referencedLBRuleIDs := utilsets.NewString() for _, refer := range referencedLBRules { referencedLBRuleIDs.Insert(pointer.StringDeref(refer.ID, "")) } @@ -3793,7 +3785,7 @@ func getMostEligibleLBForService( var found bool if existingLBs != nil { for _, existingLB := range *existingLBs { - if strings.EqualFold(pointer.StringDeref(existingLB.Name, ""), eligibleLB) { + if strings.EqualFold(trimSuffixIgnoreCase(pointer.StringDeref(existingLB.Name, ""), consts.InternalLoadBalancerNameSuffix), eligibleLB) { found = true break } @@ -3810,7 +3802,7 @@ func getMostEligibleLBForService( ruleCount := 301 if existingLBs != nil { for _, existingLB := range *existingLBs { - if StringInSlice(pointer.StringDeref(existingLB.Name, ""), eligibleLBs) { + if StringInSlice(trimSuffixIgnoreCase(pointer.StringDeref(existingLB.Name, ""), consts.InternalLoadBalancerNameSuffix), eligibleLBs) { if existingLB.LoadBalancerPropertiesFormat != nil && existingLB.LoadBalancingRules != nil { if len(*existingLB.LoadBalancingRules) < ruleCount { @@ -3826,7 +3818,7 @@ func getMostEligibleLBForService( klog.V(4).Infof("getMostEligibleLBForService: choose %s with fewest %d rules", expectedLBName, ruleCount) } - return expectedLBName + return trimSuffixIgnoreCase(expectedLBName, consts.InternalLoadBalancerNameSuffix) } func (az *Cloud) getServiceCurrentLoadBalancerName(service *v1.Service) string { @@ -3844,9 +3836,8 @@ func (az *Cloud) getServiceCurrentLoadBalancerName(service *v1.Service) string { // 2. AllowServicePlacement flag. Default to true, if set to false, the new services will not be put onto the LB. // But the existing services that is using the LB will not be affected. // 3. ServiceLabelSelector. The service will be put onto the LB only if the service has the labels specified in the selector. -// If there is no ServiceLabel selector on the LB, all services can be valid. // 4. ServiceNamespaceSelector. The service will be put onto the LB only if the service is in the namespaces specified in the selector. -// If there is no ServiceNamespace selector on the LB, all services can be valid. +// 5. If there is no label/namespace selector on the LB, it can be a valid placement target if and only if the service has no other choice. func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]string, error) { var ( eligibleLBs []MultipleStandardLoadBalancerConfiguration @@ -3857,14 +3848,19 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri lbFailedPlacementFlag []string ) + logger := klog.Background(). + WithName("getEligibleLoadBalancersForService"). + WithValues("service", service.Name) + // 1. Service selects LBs defined in the annotation. // If there is no annotation given, it selects all LBs. lbsFromAnnotation := consts.GetLoadBalancerConfigurationsNames(service) if len(lbsFromAnnotation) > 0 { - lbNamesSet := sets.New[string](lbsFromAnnotation...) + lbNamesSet := utilsets.NewString(lbsFromAnnotation...) for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations { - if lbNamesSet.Has(strings.ToLower(multiSLBConfig.Name)) { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q selects load balancer %q by annotation", service.Name, multiSLBConfig.Name) + if lbNamesSet.Has(multiSLBConfig.Name) { + logger.V(4).Info("selects the load balancer by annotation", + "load balancer configuration name", multiSLBConfig.Name) eligibleLBs = append(eligibleLBs, multiSLBConfig) lbSelectedByAnnotation = append(lbSelectedByAnnotation, multiSLBConfig.Name) } @@ -3873,13 +3869,14 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri return nil, fmt.Errorf("service %q selects %d load balancers by annotation, but none of them is defined in cloud provider configuration", service.Name, len(lbsFromAnnotation)) } } else { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q does not select any load balancer by annotation, all load balancers are eligible", service.Name) + logger.V(4).Info("the service does not select any load balancer by annotation, all load balancers are eligible") eligibleLBs = append(eligibleLBs, az.MultipleStandardLoadBalancerConfigurations...) for _, eligibleLB := range eligibleLBs { lbSelectedByAnnotation = append(lbSelectedByAnnotation, eligibleLB.Name) } } + var selectorMatched bool for i := len(eligibleLBs) - 1; i >= 0; i-- { eligibleLB := eligibleLBs[i] @@ -3887,9 +3884,11 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri // unless the service is already using the LB. if !pointer.BoolDeref(eligibleLB.AllowServicePlacement, true) { if az.isLoadBalancerInUseByService(service, eligibleLB) { - klog.V(4).Infof("getEligibleLoadBalancersForService: although load balancer %q has AllowServicePlacement=false, service %q is allowed to be placed on load balancer %q because it is using the load balancer", eligibleLB.Name, service.Name, eligibleLB.Name) + logger.V(4).Info("although the load balancer has AllowServicePlacement=false, service is allowed to be placed on load balancer because it is using the load balancer", + "load balancer configuration name", eligibleLB.Name) } else { - klog.V(4).Infof("getEligibleLoadBalancersForService: service %q is not allowed to be placed on load balancer %q", service.Name, eligibleLB.Name) + logger.V(4).Info("the load balancer has AllowServicePlacement=false, service is not allowed to be placed on load balancer", + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedPlacementFlag = append(lbFailedPlacementFlag, eligibleLB.Name) continue @@ -3901,15 +3900,23 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri if eligibleLB.ServiceLabelSelector != nil { serviceLabelSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceLabelSelector) if err != nil { - klog.Errorf("Failed to parse label selector %q for load balancer %q: %s", eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name, err.Error()) + logger.Error(err, "failed to parse label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) return []string{}, err } if !serviceLabelSelector.Matches(labels.Set(service.Labels)) { - klog.V(2).Infof("getEligibleLoadBalancersForService: service %q does not match label selector %q for load balancer %q", service.Name, eligibleLB.ServiceLabelSelector.String(), eligibleLB.Name) + logger.V(2).Info("service does not match the label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedLabelSelector = append(lbFailedLabelSelector, eligibleLB.Name) continue } + logger.V(4).Info("service matches the label selector", + "label selector", eligibleLB.ServiceLabelSelector.String(), + "load balancer configuration name", eligibleLB.Name) + selectorMatched = true } // 4. Check the service namespace selector. The service can be migrated from one LB to another LB @@ -3917,20 +3924,32 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri if eligibleLB.ServiceNamespaceSelector != nil { serviceNamespaceSelector, err := metav1.LabelSelectorAsSelector(eligibleLB.ServiceNamespaceSelector) if err != nil { - klog.Errorf("Failed to parse namespace selector %q for load balancer %q: %s", eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name, err.Error()) + logger.Error(err, "failed to parse namespace selector", + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) return []string{}, err } ns, err := az.KubeClient.CoreV1().Namespaces().Get(context.Background(), service.Namespace, metav1.GetOptions{}) if err != nil { - klog.Errorf("Failed to get namespace %q for load balancer %q: %s", service.Namespace, eligibleLB.Name, err.Error()) + logger.Error(err, "failed to get namespace", + "namespace", service.Namespace, + "load balancer configuration name", eligibleLB.Name) return []string{}, err } if !serviceNamespaceSelector.Matches(labels.Set(ns.Labels)) { - klog.V(2).Infof("getEligibleLoadBalancersForService: namespace %q does not match namespace selector %q for load balancer %q", service.Namespace, eligibleLB.ServiceNamespaceSelector.String(), eligibleLB.Name) + logger.V(2).Info("namespace does not match the namespace selector", + "namespace", service.Namespace, + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) lbFailedNamespaceSelector = append(lbFailedNamespaceSelector, eligibleLB.Name) continue } + logger.V(4).Info("namespace matches the namespace selector", + "namespace", service.Namespace, + "namespace selector", eligibleLB.ServiceNamespaceSelector.String(), + "load balancer configuration name", eligibleLB.Name) + selectorMatched = true } } @@ -3950,6 +3969,19 @@ func (az *Cloud) getEligibleLoadBalancersForService(service *v1.Service) ([]stri ) } + if selectorMatched { + for i := len(eligibleLBs) - 1; i >= 0; i-- { + eligibleLB := eligibleLBs[i] + if eligibleLB.ServiceLabelSelector == nil && eligibleLB.ServiceNamespaceSelector == nil { + logger.V(6).Info("service matches at least one label/namespace selector of the load balancer, so it should not be placed on the load balancer that does not have any label/namespace selector", + "load balancer configuration name", eligibleLB.Name) + eligibleLBs = append(eligibleLBs[:i], eligibleLBs[i+1:]...) + } + } + } else { + logger.V(4).Info("no load balancer that has label/namespace selector matches the service, so the service can be placed on the load balancers that do not have label/namespace selector") + } + for _, eligibleLB := range eligibleLBs { eligibleLBNames = append(eligibleLBNames, eligibleLB.Name) } @@ -3962,10 +3994,7 @@ func (az *Cloud) isLoadBalancerInUseByService(service *v1.Service, lbConfig Mult defer az.multipleStandardLoadBalancersActiveServicesLock.Unlock() serviceName := getServiceName(service) - if lbConfig.ActiveServices != nil { - return lbConfig.ActiveServices.Has(serviceName) - } - return false + return lbConfig.ActiveServices.Has(serviceName) } // There are two cases when a service owns the frontend IP config: diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go index 67f213c32..b33ee2e71 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go @@ -26,7 +26,6 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -35,6 +34,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) type BackendPool interface { @@ -304,8 +304,8 @@ func getBackendIPConfigurationsToBeDeleted( return []network.InterfaceIPConfiguration{} } - bipConfigNotFoundIDSet := sets.New[string]() - bipConfigExcludeIDSet := sets.New[string]() + bipConfigNotFoundIDSet := utilsets.NewString() + bipConfigExcludeIDSet := utilsets.NewString() for _, ipConfig := range bipConfigNotFound { bipConfigNotFoundIDSet.Insert(pointer.StringDeref(ipConfig.ID, "")) } @@ -344,7 +344,7 @@ func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(clusterName string, return nil, nil } - backendPrivateIPv4s, backendPrivateIPv6s := sets.New[string](), sets.New[string]() + backendPrivateIPv4s, backendPrivateIPv6s := utilsets.NewString(), utilsets.NewString() for _, bp := range *lb.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { @@ -399,7 +399,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] var ( changed bool numOfAdd, numOfDelete int - activeNodes sets.Set[string] + activeNodes *utilsets.IgnoreCaseSet err error ) if bi.useMultipleStandardLoadBalancers() { @@ -420,9 +420,6 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] return err } } - if activeNodes == nil { - activeNodes = sets.New[string]() - } } lbBackendPoolName := bi.getBackendPoolNameForService(service, clusterName, isIPv6) @@ -437,7 +434,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] backendPool.LoadBalancerBackendAddresses = &lbBackendPoolAddresses } - existingIPs := sets.New[string]() + existingIPs := utilsets.NewString() for _, loadBalancerBackendAddress := range *backendPool.LoadBalancerBackendAddresses { if loadBalancerBackendAddress.LoadBalancerBackendAddressPropertiesFormat != nil && loadBalancerBackendAddress.IPAddress != nil { @@ -447,7 +444,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] } var nodeIPsToBeAdded []string - nodePrivateIPsSet := sets.New[string]() + nodePrivateIPsSet := utilsets.NewString() for _, node := range nodes { if isControlPlaneNode(node) { klog.V(4).Infof("bi.EnsureHostsInPool: skipping control plane node %s", node.Name) @@ -458,7 +455,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(service *v1.Service, nodes [] nodePrivateIPsSet.Insert(privateIP) if bi.useMultipleStandardLoadBalancers() { - if activeNodes == nil || !activeNodes.Has(node.Name) { + if !activeNodes.Has(node.Name) { klog.V(4).Infof("bi.EnsureHostsInPool: node %s should not be in load balancer %q", node.Name, lbName) continue } @@ -519,7 +516,7 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *net found, isIPv6 := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", pointer.StringDeref(bp.Name, ""), pointer.StringDeref(slb.Name, "")) - vmIPsToBeDeleted := sets.New[string]() + vmIPsToBeDeleted := utilsets.NewString() for _, node := range nodes { vmSetName, err := bi.VMSet.GetNodeVMSetName(node) if err != nil { @@ -662,8 +659,8 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi for _, i := range bpIdxes { bp := newBackendPools[i] var nodeIPAddressesToBeDeleted []string - for nodeName := range bi.excludeLoadBalancerNodes { - for ip := range bi.nodePrivateIPs[strings.ToLower(nodeName)] { + for _, nodeName := range bi.excludeLoadBalancerNodes.UnsortedList() { + for _, ip := range bi.nodePrivateIPs[strings.ToLower(nodeName)].UnsortedList() { klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): found unwanted node private IP %s, decouple it from the LB %s", serviceName, ip, lbName) nodeIPAddressesToBeDeleted = append(nodeIPAddressesToBeDeleted, ip) } @@ -744,7 +741,7 @@ func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(clusterName string, servic return nil, nil } - backendPrivateIPv4s, backendPrivateIPv6s := sets.New[string](), sets.New[string]() + backendPrivateIPv4s, backendPrivateIPv6s := utilsets.NewString(), utilsets.NewString() for _, bp := range *lb.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { @@ -839,7 +836,7 @@ func removeNodeIPAddressesFromBackendPool( removeAll, useMultipleStandardLoadBalancers bool, ) bool { changed := false - nodeIPsSet := sets.New(nodeIPAddresses...) + nodeIPsSet := utilsets.NewString(nodeIPAddresses...) if backendPool.BackendAddressPoolPropertiesFormat == nil || backendPool.LoadBalancerBackendAddresses == nil { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go index 83ac58487..cfee45118 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_repo.go @@ -26,8 +26,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -35,6 +35,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry @@ -87,7 +88,7 @@ func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterNa return nil, nil } - managedLBNames := sets.New[string](strings.ToLower(clusterName)) + managedLBNames := utilsets.NewString(clusterName) managedLBs := make([]network.LoadBalancer, 0) if strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuBasic) { // return early if wantLb=false @@ -121,7 +122,7 @@ func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterNa } for _, lb := range allLBs { - if managedLBNames.Has(strings.ToLower(strings.TrimSuffix(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix))) { + if managedLBNames.Has(trimSuffixIgnoreCase(pointer.StringDeref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix)) { managedLBs = append(managedLBs, lb) klog.V(4).Infof("ListManagedLBs: found managed LB %s", pointer.StringDeref(lb.Name, "")) } @@ -372,7 +373,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } newLBName := matches[1] - newLBNameTrimmed := strings.TrimSuffix(newLBName, consts.InternalLoadBalancerNameSuffix) + newLBNameTrimmed := trimSuffixIgnoreCase(newLBName, consts.InternalLoadBalancerNameSuffix) for _, backendPool := range existingBackendPools { matches := backendPoolIDRE.FindStringSubmatch(backendPool) if len(matches) != 2 { @@ -380,7 +381,7 @@ func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []strin } lbName := matches[1] - if !strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + if !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), newLBNameTrimmed) { return false, lbName, nil } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go index 25ff0e816..63640d7b4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_local_services.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/retry" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) // batchProcessor collects operations in a certain interval and then processes them in batches. @@ -329,11 +330,11 @@ func (az *Cloud) setUpEndpointSlicesInformer(informerFactory informers.SharedInf } for _, previousNodeName := range previousNodeNames { nodeIPsSet := az.nodePrivateIPs[strings.ToLower(previousNodeName)] - previousIPs = append(previousIPs, setToStrings(nodeIPsSet)...) + previousIPs = append(previousIPs, nodeIPsSet.UnsortedList()...) } for _, currentNodeName := range currentNodeNames { nodeIPsSet := az.nodePrivateIPs[strings.ToLower(currentNodeName)] - currentIPs = append(currentIPs, setToStrings(nodeIPsSet)...) + currentIPs = append(currentIPs, nodeIPsSet.UnsortedList()...) } if az.backendPoolUpdater != nil { @@ -462,7 +463,7 @@ func newServiceInfo(ipFamily, lbName string) *serviceInfo { } // getLocalServiceEndpointsNodeNames gets the node names that host all endpoints of the local service. -func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (sets.Set[string], error) { +func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (*utilsets.IgnoreCaseSet, error) { var ( ep *discovery_v1.EndpointSlice foundInCache bool @@ -505,7 +506,7 @@ func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) (sets.Se nodeNames = append(nodeNames, pointer.StringDeref(endpoint.NodeName, "")) } - return sets.New[string](nodeNames...), nil + return utilsets.NewString(nodeNames...), nil } // cleanupLocalServiceBackendPool cleans up the backend pool of @@ -553,11 +554,9 @@ func (az *Cloud) checkAndApplyLocalServiceBackendPoolUpdates(lb network.LoadBala return err } var expectedIPs []string - for nodeName := range endpointsNodeNames { + for _, nodeName := range endpointsNodeNames.UnsortedList() { ips := az.nodePrivateIPs[strings.ToLower(nodeName)] - for ip := range ips { - expectedIPs = append(expectedIPs, ip) - } + expectedIPs = append(expectedIPs, ips.UnsortedList()...) } currentIPsInBackendPools := make(map[string][]string) for _, bp := range *lb.BackendAddressPools { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go deleted file mode 100644 index 206be6034..000000000 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go +++ /dev/null @@ -1,461 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -import ( - "context" - "fmt" - "net/http" - "path" - "strconv" - "strings" - - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - kwait "k8s.io/apimachinery/pkg/util/wait" - cloudprovider "k8s.io/cloud-provider" - cloudvolume "k8s.io/cloud-provider/volume" - volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - - "sigs.k8s.io/cloud-provider-azure/pkg/consts" -) - -var _ cloudprovider.PVLabeler = (*Cloud)(nil) - -// ManagedDiskController : managed disk controller struct -type ManagedDiskController struct { - *controllerCommon -} - -// ManagedDiskOptions specifies the options of managed disks. -type ManagedDiskOptions struct { - // The SKU of storage account. - StorageAccountType compute.DiskStorageAccountTypes - // The name of the disk. - DiskName string - // The name of PVC. - PVCName string - // The name of resource group. - ResourceGroup string - // The AvailabilityZone to create the disk. - AvailabilityZone string - // The tags of the disk. - Tags map[string]string - // IOPS Caps for UltraSSD disk - DiskIOPSReadWrite string - // Throughput Cap (MBps) for UltraSSD disk - DiskMBpsReadWrite string - // if SourceResourceID is not empty, then it's a disk copy operation(for snapshot) - SourceResourceID string - // The type of source - SourceType string - // ResourceId of the disk encryption set to use for enabling encryption at rest. - DiskEncryptionSetID string - // DiskEncryption type, available values: EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys - DiskEncryptionType string - // The size in GB. - SizeGB int - // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. - MaxShares int32 - // Logical sector size in bytes for Ultra disks - LogicalSectorSize int32 - // SkipGetDiskOperation indicates whether skip GetDisk operation(mainly due to throttling) - SkipGetDiskOperation bool - // PublicNetworkAccess - Possible values include: 'Enabled', 'Disabled' - PublicNetworkAccess compute.PublicNetworkAccess - // NetworkAccessPolicy - Possible values include: 'AllowAll', 'AllowPrivate', 'DenyAll' - NetworkAccessPolicy compute.NetworkAccessPolicy - // DiskAccessID - ARM id of the DiskAccess resource for using private endpoints on disks. - DiskAccessID *string - // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. - BurstingEnabled *bool - // SubscriptionID - specify a different SubscriptionID - SubscriptionID string - // Location - specify a different location - Location string - // PerformancePlus - Set this flag to true to get a boost on the performance target of the disk deployed - PerformancePlus *bool -} - -// CreateManagedDisk: create managed disk -func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options *ManagedDiskOptions) (string, error) { - var err error - klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) - - var createZones []string - if len(options.AvailabilityZone) > 0 { - requestedZone := c.cloud.GetZoneID(options.AvailabilityZone) - if requestedZone != "" { - createZones = append(createZones, requestedZone) - } - } - - // insert original tags to newTags - newTags := make(map[string]*string) - azureDDTag := "kubernetes-azure-dd" - newTags[consts.CreatedByTag] = &azureDDTag - if options.Tags != nil { - for k, v := range options.Tags { - // Azure won't allow / (forward slash) in tags - newKey := strings.Replace(k, "/", "-", -1) - newValue := strings.Replace(v, "/", "-", -1) - newTags[newKey] = &newValue - } - } - - diskSizeGB := int32(options.SizeGB) - diskSku := options.StorageAccountType - - rg := c.cloud.ResourceGroup - if options.ResourceGroup != "" { - rg = options.ResourceGroup - } - if options.SubscriptionID != "" && !strings.EqualFold(options.SubscriptionID, c.cloud.SubscriptionID) && options.ResourceGroup == "" { - return "", fmt.Errorf("resourceGroup must be specified when subscriptionID(%s) is not empty", options.SubscriptionID) - } - subsID := c.cloud.SubscriptionID - if options.SubscriptionID != "" { - subsID = options.SubscriptionID - } - - creationData, err := getValidCreationData(subsID, rg, options) - if err != nil { - return "", err - } - diskProperties := compute.DiskProperties{ - DiskSizeGB: &diskSizeGB, - CreationData: &creationData, - BurstingEnabled: options.BurstingEnabled, - } - - if options.PublicNetworkAccess != "" { - diskProperties.PublicNetworkAccess = options.PublicNetworkAccess - } - - if options.NetworkAccessPolicy != "" { - diskProperties.NetworkAccessPolicy = options.NetworkAccessPolicy - if options.NetworkAccessPolicy == compute.AllowPrivate { - if options.DiskAccessID == nil { - return "", fmt.Errorf("DiskAccessID should not be empty when NetworkAccessPolicy is AllowPrivate") - } - diskProperties.DiskAccessID = options.DiskAccessID - } else { - if options.DiskAccessID != nil { - return "", fmt.Errorf("DiskAccessID(%s) must be empty when NetworkAccessPolicy(%s) is not AllowPrivate", *options.DiskAccessID, options.NetworkAccessPolicy) - } - } - } - - if diskSku == compute.UltraSSDLRS || diskSku == compute.PremiumV2LRS { - if options.DiskIOPSReadWrite == "" { - if diskSku == compute.UltraSSDLRS { - diskIOPSReadWrite := int64(consts.DefaultDiskIOPSReadWrite) - diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) - } - } else { - v, err := strconv.Atoi(options.DiskIOPSReadWrite) - if err != nil { - return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %w", err) - } - diskIOPSReadWrite := int64(v) - diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite) - } - - if options.DiskMBpsReadWrite == "" { - if diskSku == compute.UltraSSDLRS { - diskMBpsReadWrite := int64(consts.DefaultDiskMBpsReadWrite) - diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) - } - } else { - v, err := strconv.Atoi(options.DiskMBpsReadWrite) - if err != nil { - return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %w", err) - } - diskMBpsReadWrite := int64(v) - diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite) - } - - if options.LogicalSectorSize != 0 { - klog.V(2).Infof("AzureDisk - requested LogicalSectorSize: %v", options.LogicalSectorSize) - diskProperties.CreationData.LogicalSectorSize = pointer.Int32(options.LogicalSectorSize) - } - } else { - if options.DiskIOPSReadWrite != "" { - return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type") - } - if options.DiskMBpsReadWrite != "" { - return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type") - } - if options.LogicalSectorSize != 0 { - return "", fmt.Errorf("AzureDisk - LogicalSectorSize parameter is only applicable in UltraSSD_LRS disk type") - } - } - - if options.DiskEncryptionSetID != "" { - if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 { - return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, consts.DiskEncryptionSetIDFormat) - } - encryptionType := compute.EncryptionTypeEncryptionAtRestWithCustomerKey - if options.DiskEncryptionType != "" { - encryptionType = compute.EncryptionType(options.DiskEncryptionType) - klog.V(4).Infof("azureDisk - DiskEncryptionType: %s, DiskEncryptionSetID: %s", options.DiskEncryptionType, options.DiskEncryptionSetID) - } - diskProperties.Encryption = &compute.Encryption{ - DiskEncryptionSetID: &options.DiskEncryptionSetID, - Type: encryptionType, - } - } else { - if options.DiskEncryptionType != "" { - return "", fmt.Errorf("AzureDisk - DiskEncryptionType(%s) should be empty when DiskEncryptionSetID is not set", options.DiskEncryptionType) - } - } - - if options.MaxShares > 1 { - diskProperties.MaxShares = &options.MaxShares - } - - location := c.cloud.Location - if options.Location != "" { - location = options.Location - } - model := compute.Disk{ - Location: &location, - Tags: newTags, - Sku: &compute.DiskSku{ - Name: diskSku, - }, - DiskProperties: &diskProperties, - } - - if c.cloud.HasExtendedLocation() { - model.ExtendedLocation = &compute.ExtendedLocation{ - Name: pointer.String(c.cloud.ExtendedLocationName), - Type: compute.ExtendedLocationTypes(c.cloud.ExtendedLocationType), - } - } - - if len(createZones) > 0 { - model.Zones = &createZones - } - - if rerr := c.cloud.DisksClient.CreateOrUpdate(ctx, subsID, rg, options.DiskName, model); rerr != nil { - return "", rerr.Error() - } - - diskID := fmt.Sprintf(managedDiskPath, subsID, rg, options.DiskName) - - if options.SkipGetDiskOperation { - klog.Warningf("azureDisk - GetDisk(%s, StorageAccountType:%s) is throttled, unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType) - } else { - err = kwait.ExponentialBackoffWithContext(ctx, defaultBackOff, func(ctx context.Context) (bool, error) { - provisionState, id, err := c.GetDisk(ctx, subsID, rg, options.DiskName) - if err == nil { - if id != "" { - diskID = id - } - } else { - // We are waiting for provisioningState==Succeeded - // We don't want to hand-off managed disks to k8s while they are - //still being provisioned, this is to avoid some race conditions - return false, err - } - if strings.ToLower(provisionState) == "succeeded" { - return true, nil - } - return false, nil - }) - - if err != nil { - klog.Warningf("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) - } - } - - klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) - return diskID, nil -} - -// DeleteManagedDisk : delete managed disk -func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI string) error { - resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) - if err != nil { - return err - } - - if state, ok := c.diskStateMap.Load(strings.ToLower(diskURI)); ok { - return fmt.Errorf("failed to delete disk(%s) since it's in %s state", diskURI, state.(string)) - } - - diskName := path.Base(diskURI) - disk, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - if rerr.HTTPStatusCode == http.StatusNotFound { - klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI) - return nil - } - // ignore GetDisk throttling - if !rerr.IsThrottled() && !strings.Contains(rerr.RawError.Error(), consts.RateLimited) { - return rerr.Error() - } - } - if disk.ManagedBy != nil { - return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy) - } - - if rerr := c.cloud.DisksClient.Delete(ctx, subsID, resourceGroup, diskName); rerr != nil { - return rerr.Error() - } - // We don't need poll here, k8s will immediately stop referencing the disk - // the disk will be eventually deleted - cleanly - by ARM - - klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) - - return nil -} - -// GetDisk return: disk provisionState, diskID, error -func (c *ManagedDiskController) GetDisk(ctx context.Context, subsID, resourceGroup, diskName string) (string, string, error) { - result, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return "", "", rerr.Error() - } - - if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil { - return *(*result.DiskProperties).ProvisioningState, *result.ID, nil - } - return "", "", nil -} - -// ResizeDisk Expand the disk to new size -func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, oldSize resource.Quantity, newSize resource.Quantity, supportOnlineResize bool) (resource.Quantity, error) { - diskName := path.Base(diskURI) - resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) - if err != nil { - return oldSize, err - } - - result, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - return oldSize, rerr.Error() - } - - if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil { - return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName) - } - - // Azure resizes in chunks of GiB (not GB) - requestGiB, err := volumehelpers.RoundUpToGiBInt32(newSize) - if err != nil { - return oldSize, err - } - - newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) - - klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) - // If disk already of greater or equal size than requested we return - if *result.DiskProperties.DiskSizeGB >= requestGiB { - return newSizeQuant, nil - } - - if !supportOnlineResize && result.DiskProperties.DiskState != compute.Unattached { - return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, pointer.StringDeref(result.ManagedBy, "")) - } - - diskParameter := compute.DiskUpdate{ - DiskUpdateProperties: &compute.DiskUpdateProperties{ - DiskSizeGB: &requestGiB, - }, - } - - if rerr := c.cloud.DisksClient.Update(ctx, subsID, resourceGroup, diskName, diskParameter); rerr != nil { - return oldSize, rerr.Error() - } - - klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) - return newSizeQuant, nil -} - -// get resource group name, subs id from a managed disk URI, e.g. return {group-name}, {sub-id} according to -// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id} -// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get -func getInfoFromDiskURI(diskURI string) (string, string, error) { - fields := strings.Split(diskURI, "/") - if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" { - return "", "", fmt.Errorf("invalid disk URI: %s", diskURI) - } - return fields[4], fields[2], nil -} - -// PVLabeler is an abstract, pluggable interface for fetching labels for volumes -// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume -func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { - // Ignore if not AzureDisk. - if pv.Spec.AzureDisk == nil { - return nil, nil - } - - // Ignore any volumes that are being provisioned - if pv.Spec.AzureDisk.DiskName == cloudvolume.ProvisionedVolumeName { - return nil, nil - } - - return c.GetAzureDiskLabels(ctx, pv.Spec.AzureDisk.DataDiskURI) -} - -// GetAzureDiskLabels gets availability zone labels for Azuredisk. -func (c *Cloud) GetAzureDiskLabels(ctx context.Context, diskURI string) (map[string]string, error) { - // Get disk's resource group. - diskName := path.Base(diskURI) - resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) - if err != nil { - klog.Errorf("Failed to get resource group for AzureDisk %s: %v", diskName, err) - return nil, err - } - - labels := map[string]string{ - consts.LabelFailureDomainBetaRegion: c.Location, - } - // no azure credential is set, return nil - if c.DisksClient == nil { - return labels, nil - } - disk, rerr := c.DisksClient.Get(ctx, subsID, resourceGroup, diskName) - if rerr != nil { - klog.Errorf("Failed to get information for AzureDisk %s: %v", diskName, rerr) - return nil, rerr.Error() - } - - // Check whether availability zone is specified. - if disk.Zones == nil || len(*disk.Zones) == 0 { - klog.V(4).Infof("Azure disk %s is not zoned", diskName) - return labels, nil - } - - zones := *disk.Zones - zoneID, err := strconv.Atoi(zones[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %w", zones, diskName, err) - } - - zone := c.makeZone(c.Location, zoneID) - klog.V(4).Infof("Got zone %s for Azure disk %s", zone, diskName) - labels[consts.LabelFailureDomainBetaZone] = zone - return labels, nil -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go index 833fb38bc..78de8bf84 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go @@ -1,34 +1,42 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - +// /* +// Copyright The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: azure_vmsets.go +// +// Generated by this command: +// +// mockgen -package provider -source azure_vmsets.go -self_package sigs.k8s.io/cloud-provider-azure/pkg/provider -copyright_file ../../hack/boilerplate/boilerplate.generatego.txt +// + +// Package provider is a generated GoMock package. package provider import ( context "context" reflect "reflect" + v5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - azure "github.com/Azure/go-autorest/autorest/azure" gomock "go.uber.org/mock/gomock" - v1 "k8s.io/api/core/v1" types "k8s.io/apimachinery/pkg/types" cloud_provider "k8s.io/cloud-provider" - cache "sigs.k8s.io/cloud-provider-azure/pkg/cache" ) @@ -56,16 +64,15 @@ func (m *MockVMSet) EXPECT() *MockVMSetMockRecorder { } // AttachDisk mocks base method. -func (m *MockVMSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { +func (m *MockVMSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AttachDisk", ctx, nodeName, diskMap) - ret0, _ := ret[0].(*azure.Future) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret0, _ := ret[0].(error) + return ret0 } // AttachDisk indicates an expected call of AttachDisk. -func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) AttachDisk(ctx, nodeName, diskMap any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachDisk", reflect.TypeOf((*MockVMSet)(nil).AttachDisk), ctx, nodeName, diskMap) } @@ -79,7 +86,7 @@ func (m *MockVMSet) DeleteCacheForNode(nodeName string) error { } // DeleteCacheForNode indicates an expected call of DeleteCacheForNode. -func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCacheForNode", reflect.TypeOf((*MockVMSet)(nil).DeleteCacheForNode), nodeName) } @@ -93,7 +100,7 @@ func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis } // DetachDisk indicates an expected call of DetachDisk. -func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) DetachDisk(ctx, nodeName, diskMap any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachDisk", reflect.TypeOf((*MockVMSet)(nil).DetachDisk), ctx, nodeName, diskMap) } @@ -108,7 +115,7 @@ func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolIDs } // EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted. -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolIDs, vmSetName, backendAddressPools, deleteFromVMSet) } @@ -122,7 +129,7 @@ func (m *MockVMSet) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string] } // EnsureBackendPoolDeletedFromVMSets indicates an expected call of EnsureBackendPoolDeletedFromVMSets. -func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolIDs interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap, backendPoolIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeletedFromVMSets", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeletedFromVMSets), vmSetNamesMap, backendPoolIDs) } @@ -140,7 +147,7 @@ func (m *MockVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam } // EnsureHostInPool indicates an expected call of EnsureHostInPool. -func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPoolID, vmSetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostInPool), service, nodeName, backendPoolID, vmSetName) } @@ -154,7 +161,7 @@ func (m *MockVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac } // EnsureHostsInPool indicates an expected call of EnsureHostsInPool. -func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) EnsureHostsInPool(service, nodes, backendPoolID, vmSetName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureHostsInPool", reflect.TypeOf((*MockVMSet)(nil).EnsureHostsInPool), service, nodes, backendPoolID, vmSetName) } @@ -169,23 +176,23 @@ func (m *MockVMSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) } // GetAgentPoolVMSetNames indicates an expected call of GetAgentPoolVMSetNames. -func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetAgentPoolVMSetNames(nodes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAgentPoolVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetAgentPoolVMSetNames), nodes) } // GetDataDisks mocks base method. -func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { +func (m *MockVMSet) GetDataDisks(nodeName types.NodeName, crt cache.AzureCacheReadType) ([]*v5.DataDisk, *string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDataDisks", nodeName, crt) - ret0, _ := ret[0].([]compute.DataDisk) + ret0, _ := ret[0].([]*v5.DataDisk) ret1, _ := ret[1].(*string) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // GetDataDisks indicates an expected call of GetDataDisks. -func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetDataDisks(nodeName, crt any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataDisks", reflect.TypeOf((*MockVMSet)(nil).GetDataDisks), nodeName, crt) } @@ -201,7 +208,7 @@ func (m *MockVMSet) GetIPByNodeName(name string) (string, string, error) { } // GetIPByNodeName indicates an expected call of GetIPByNodeName. -func (mr *MockVMSetMockRecorder) GetIPByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetIPByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetIPByNodeName), name) } @@ -216,7 +223,7 @@ func (m *MockVMSet) GetInstanceIDByNodeName(name string) (string, error) { } // GetInstanceIDByNodeName indicates an expected call of GetInstanceIDByNodeName. -func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetInstanceIDByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIDByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceIDByNodeName), name) } @@ -231,7 +238,7 @@ func (m *MockVMSet) GetInstanceTypeByNodeName(name string) (string, error) { } // GetInstanceTypeByNodeName indicates an expected call of GetInstanceTypeByNodeName. -func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetInstanceTypeByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetInstanceTypeByNodeName), name) } @@ -247,7 +254,7 @@ func (m *MockVMSet) GetNodeCIDRMasksByProviderID(providerID string) (int, int, e } // GetNodeCIDRMasksByProviderID indicates an expected call of GetNodeCIDRMasksByProviderID. -func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeCIDRMasksByProviderID(providerID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeCIDRMasksByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeCIDRMasksByProviderID), providerID) } @@ -263,7 +270,7 @@ func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (st } // GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID. -func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeNameByIPConfigurationID(ipConfigurationID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByIPConfigurationID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByIPConfigurationID), ipConfigurationID) } @@ -278,7 +285,7 @@ func (m *MockVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, } // GetNodeNameByProviderID indicates an expected call of GetNodeNameByProviderID. -func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeNameByProviderID(providerID any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeNameByProviderID", reflect.TypeOf((*MockVMSet)(nil).GetNodeNameByProviderID), providerID) } @@ -293,7 +300,7 @@ func (m *MockVMSet) GetNodeVMSetName(node *v1.Node) (string, error) { } // GetNodeVMSetName indicates an expected call of GetNodeVMSetName. -func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetNodeVMSetName(node any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeVMSetName", reflect.TypeOf((*MockVMSet)(nil).GetNodeVMSetName), node) } @@ -308,7 +315,7 @@ func (m *MockVMSet) GetPowerStatusByNodeName(name string) (string, error) { } // GetPowerStatusByNodeName indicates an expected call of GetPowerStatusByNodeName. -func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPowerStatusByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerStatusByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPowerStatusByNodeName), name) } @@ -323,7 +330,7 @@ func (m *MockVMSet) GetPrimaryInterface(nodeName string) (network.Interface, err } // GetPrimaryInterface indicates an expected call of GetPrimaryInterface. -func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPrimaryInterface(nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrimaryInterface", reflect.TypeOf((*MockVMSet)(nil).GetPrimaryInterface), nodeName) } @@ -352,7 +359,7 @@ func (m *MockVMSet) GetPrivateIPsByNodeName(name string) ([]string, error) { } // GetPrivateIPsByNodeName indicates an expected call of GetPrivateIPsByNodeName. -func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrivateIPsByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetPrivateIPsByNodeName), name) } @@ -367,7 +374,7 @@ func (m *MockVMSet) GetProvisioningStateByNodeName(name string) (string, error) } // GetProvisioningStateByNodeName indicates an expected call of GetProvisioningStateByNodeName. -func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetProvisioningStateByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisioningStateByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetProvisioningStateByNodeName), name) } @@ -382,7 +389,7 @@ func (m *MockVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (*[]str } // GetVMSetNames indicates an expected call of GetVMSetNames. -func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetVMSetNames(service, nodes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMSetNames", reflect.TypeOf((*MockVMSet)(nil).GetVMSetNames), service, nodes) } @@ -397,7 +404,7 @@ func (m *MockVMSet) GetZoneByNodeName(name string) (cloud_provider.Zone, error) } // GetZoneByNodeName indicates an expected call of GetZoneByNodeName. -func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) GetZoneByNodeName(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetZoneByNodeName", reflect.TypeOf((*MockVMSet)(nil).GetZoneByNodeName), name) } @@ -411,36 +418,7 @@ func (m *MockVMSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error } // UpdateVM indicates an expected call of UpdateVM. -func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName interface{}) *gomock.Call { +func (mr *MockVMSetMockRecorder) UpdateVM(ctx, nodeName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVM", reflect.TypeOf((*MockVMSet)(nil).UpdateVM), ctx, nodeName) } - -// UpdateVMAsync mocks base method. -func (m *MockVMSet) UpdateVMAsync(ctx context.Context, nodeName types.NodeName) (*azure.Future, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateVMAsync", ctx, nodeName) - ret0, _ := ret[0].(*azure.Future) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateVMAsync indicates an expected call of UpdateVMAsync. -func (mr *MockVMSetMockRecorder) UpdateVMAsync(ctx, nodeName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVMAsync", reflect.TypeOf((*MockVMSet)(nil).UpdateVMAsync), ctx, nodeName) -} - -// WaitForUpdateResult mocks base method. -func (m *MockVMSet) WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, nodeName, source) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitForUpdateResult indicates an expected call of WaitForUpdateResult. -func (mr *MockVMSetMockRecorder) WaitForUpdateResult(ctx, future, nodeName, source interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockVMSet)(nil).WaitForUpdateResult), ctx, future, nodeName, source) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index ca2a1bdae..1d1863b2c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -38,10 +38,12 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -121,7 +123,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string { } func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) { - vmSetName = strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix) + vmSetName = trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix) if strings.EqualFold(clusterName, vmSetName) { vmSetName = az.VMSet.GetPrimaryVMSetName() } @@ -501,19 +503,13 @@ func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState str return powerState, err } - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - statuses := *vm.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if vm.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(vm.Name, ""), vm.InstanceView.Statuses), nil } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetProvisioningStateByNodeName returns the provisioningState for the specified node. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index a8d683efc..a26719f80 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -285,7 +285,7 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou } if len(accountOptions.StorageEndpointSuffix) == 0 { - accountOptions.StorageEndpointSuffix = az.cloud.Environment.StorageEndpointSuffix + accountOptions.StorageEndpointSuffix = az.Environment.StorageEndpointSuffix } privateDNSZoneName = fmt.Sprintf(privateDNSZoneNameFmt, accountOptions.StorageType, accountOptions.StorageEndpointSuffix) } @@ -635,10 +635,10 @@ func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup, pr func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, privateDNSZoneName, vnetResourceGroup) var clientFactory azclient.ClientFactory - if az.cloud.NetworkClientFactory != nil { - clientFactory = az.cloud.NetworkClientFactory + if az.NetworkClientFactory != nil { + clientFactory = az.NetworkClientFactory } else { - clientFactory = az.cloud.ComputeClientFactory + clientFactory = az.ComputeClientFactory } vnetLinkClient := clientFactory.GetVirtualNetworkLinkClient() @@ -840,17 +840,16 @@ func isTagsEqual(account storage.Account, accountOptions *AccountOptions) bool { return true } - for k, v := range account.Tags { - var value string - // nil and empty value should be regarded as equal - if v != nil { - value = *v - } - if accountOptions.Tags[k] != value { + if len(account.Tags) < len(accountOptions.Tags) { + return false + } + + // ensure all tags in accountOptions are in account + for k, v := range accountOptions.Tags { + if pointer.StringDeref(account.Tags[k], "") != v { return false } } - return true } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 70ae36338..541c74b18 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -18,15 +18,17 @@ package provider import ( "context" + "encoding/json" "fmt" "net" "strings" "sync" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" "k8s.io/utils/pointer" @@ -44,21 +46,21 @@ var strToExtendedLocationType = map[string]network.ExtendedLocationTypes{ "edgezone": network.EdgeZone, } -// lockMap used to lock on entries -type lockMap struct { +// LockMap used to lock on entries +type LockMap struct { sync.Mutex mutexMap map[string]*sync.Mutex } // NewLockMap returns a new lock map -func newLockMap() *lockMap { - return &lockMap{ +func newLockMap() *LockMap { + return &LockMap{ mutexMap: make(map[string]*sync.Mutex), } } // LockEntry acquires a lock associated with the specific entry -func (lm *lockMap) LockEntry(entry string) { +func (lm *LockMap) LockEntry(entry string) { lm.Lock() // check if entry does not exists, then add entry mutex, exists := lm.mutexMap[entry] @@ -71,7 +73,7 @@ func (lm *lockMap) LockEntry(entry string) { } // UnlockEntry release the lock associated with the specific entry -func (lm *lockMap) UnlockEntry(entry string) { +func (lm *LockMap) UnlockEntry(entry string) { lm.Lock() defer lm.Unlock() @@ -502,36 +504,6 @@ func IntInSlice(i int, list []int) bool { return false } -func safeAddKeyToStringsSet(set sets.Set[string], key string) sets.Set[string] { - if set != nil { - set.Insert(key) - } else { - set = sets.New[string](key) - } - - return set -} - -func safeRemoveKeyFromStringsSet(set sets.Set[string], key string) (sets.Set[string], bool) { - var has bool - if set != nil { - if set.Has(key) { - has = true - } - set.Delete(key) - } - - return set, has -} - -func setToStrings(set sets.Set[string]) []string { - var res []string - for key := range set { - res = append(res, key) - } - return res -} - func isLocalService(service *v1.Service) bool { return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal } @@ -566,3 +538,36 @@ func getResourceGroupAndNameFromNICID(ipConfigurationID string) (string, string, func isInternalLoadBalancer(lb *network.LoadBalancer) bool { return strings.HasSuffix(strings.ToLower(*lb.Name), consts.InternalLoadBalancerNameSuffix) } + +// trimSuffixIgnoreCase trims the suffix from the string, case-insensitive. +// It returns the original string if the suffix is not found. +// The returning string is in lower case. +func trimSuffixIgnoreCase(str, suf string) string { + str = strings.ToLower(str) + suf = strings.ToLower(suf) + if strings.HasSuffix(str, suf) { + return strings.TrimSuffix(str, suf) + } + return str +} + +// ToArmcomputeDisk converts compute.DataDisk to armcompute.DataDisk +// This is a workaround during track2 migration. +// TODO: remove this function after compute api is migrated to track2 +func ToArmcomputeDisk(disks []compute.DataDisk) ([]*armcompute.DataDisk, error) { + var result []*armcompute.DataDisk + for _, disk := range disks { + content, err := json.Marshal(disk) + if err != nil { + return nil, err + } + var dataDisk armcompute.DataDisk + err = json.Unmarshal(content, &dataDisk) + if err != nil { + return nil, err + } + result = append(result, &dataDisk) + } + + return result, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go index e405ef3a7..d680f5ced 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go @@ -19,9 +19,9 @@ package provider import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" - "github.com/Azure/go-autorest/autorest/azure" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -76,21 +76,17 @@ type VMSet interface { EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolIDs []string) error // AttachDisk attaches a disk to vm - AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) + AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error // DetachDisk detaches a disk from vm DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error // WaitForUpdateResult waits for the response of the update request - WaitForUpdateResult(ctx context.Context, future *azure.Future, nodeName types.NodeName, source string) error // GetDataDisks gets a list of data disks attached to the node. - GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) + GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]*armcompute.DataDisk, *string, error) // UpdateVM updates a vm UpdateVM(ctx context.Context, nodeName types.NodeName) error - // UpdateVMAsync updates a vm asynchronously - UpdateVMAsync(ctx context.Context, nodeName types.NodeName) (*azure.Future, error) - // GetPowerStatusByNodeName returns the powerState for the specified node. GetPowerStatusByNodeName(name string) (string, error) @@ -112,3 +108,12 @@ type VMSet interface { // DeleteCacheForNode removes the node entry from cache. DeleteCacheForNode(nodeName string) error } + +// AttachDiskOptions attach disk options +type AttachDiskOptions struct { + CachingMode compute.CachingTypes + DiskName string + DiskEncryptionSetID string + WriteAcceleratorEnabled bool + Lun int32 +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go index d423a2dfd..3c84f767b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go @@ -35,11 +35,13 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -101,7 +103,7 @@ type ScaleSet struct { nonVmssUniformNodesCache azcache.Resource // lockMap in cache refresh - lockMap *lockMap + lockMap *LockMap } // newScaleSet creates a new ScaleSet. @@ -144,6 +146,7 @@ func newScaleSet(ctx context.Context, az *Cloud) (VMSet, error) { return nil, err } + ss.lockMap = newLockMap() return ss, nil } @@ -281,20 +284,14 @@ func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, er if vm.IsVirtualMachineScaleSetVM() { v := vm.AsVirtualMachineScaleSetVM() - if v.InstanceView != nil && v.InstanceView.Statuses != nil { - statuses := *v.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if v.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(v.Name, ""), v.InstanceView.Statuses), nil } } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetProvisioningStateByNodeName returns the provisioningState for the specified node. @@ -1028,6 +1025,8 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { + logger := klog.Background().WithName("EnsureHostInPool"). + WithValues("nodeName", nodeName, "backendPoolID", backendPoolID, "vmSetNameOfLB", vmSetNameOfLB) vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -1036,13 +1035,20 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac return "", "", "", nil, nil } - klog.Errorf("EnsureHostInPool: failed to get VMSS VM %s: %v", vmName, err) + logger.Error(err, "failed to get vmss vm", "vmName", vmName) if !errors.Is(err, ErrorNotVmssInstance) { return "", "", "", nil, err } } + statuses := vm.GetInstanceViewStatus() + vmPowerState := vmutil.GetVMPowerState(vm.Name, statuses) + provisioningState := vm.GetProvisioningState() + if vmutil.IsNotActiveVMState(provisioningState, vmPowerState) { + logger.V(2).Info("skip updating the node because it is not in an active state", "vmName", vmName, "provisioningState", provisioningState, "vmPowerState", vmPowerState) + return "", "", "", nil, nil + } - klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vm.VMSSName, backendPoolID) + logger.V(2).Info("ensuring the vmss node in LB backendpool", "vmss name", vm.VMSSName) // Check scale set name: // - For basic SKU load balancer, return nil if the node's scale set is mismatched with vmSetNameOfLB. @@ -1056,14 +1062,13 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac } if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, vm.VMSSName) { - klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB) + logger.V(3).Info("skips the node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB) return "", "", "", nil, nil } // Find primary network interface configuration. if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, "+ - "probably because the vm's being deleted", vmName) + logger.V(4).Info("cannot obtain the primary network interface configuration, of the vm, probably because the vm's being deleted", "vmName", vmName) return "", "", "", nil, nil } @@ -1113,7 +1118,7 @@ func (ss *ScaleSet) EnsureHostInPool(_ *v1.Service, nodeName types.NodeName, bac return "", "", "", nil, err } if !isSameLB { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + logger.V(4).Info("The node has already been added to an LB, omit adding it to a new one", "lbName", oldLBName) return "", "", "", nil, nil } } @@ -1220,7 +1225,7 @@ func (ss *ScaleSet) ensureVMSSInPool(_ *v1.Service, nodes []*v1.Node, backendPoo // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName) continue } @@ -1492,6 +1497,7 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted // from the specified node, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName string, backendPoolIDs []string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { + logger := klog.Background().WithName("ensureBackendPoolDeletedFromNode").WithValues("nodeName", nodeName, "backendPoolIDs", backendPoolIDs) vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { @@ -1502,6 +1508,14 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName string, backendPoo return "", "", "", nil, err } + statuses := vm.GetInstanceViewStatus() + vmPowerState := vmutil.GetVMPowerState(vm.Name, statuses) + provisioningState := vm.GetProvisioningState() + if vmutil.IsNotActiveVMState(provisioningState, vmPowerState) { + logger.V(2).Info("skip updating the node because it is not in an active state", "provisioningState", provisioningState, "vmPowerState", vmPowerState) + return "", "", "", nil, nil + } + // Find primary network interface configuration. if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { klog.V(4).Infof("ensureBackendPoolDeletedFromNode: cannot obtain the primary network interface configuration, of vm %s, "+ @@ -1678,7 +1692,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(backendPoolIDs []str // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s being deleted, skipping", pointer.StringDeref(vmss.Name, "")) return true } @@ -2085,7 +2099,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) continue } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go index 592e05d16..11949faf9 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_cache.go @@ -25,12 +25,12 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "k8s.io/utils/pointer" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) type VMSSVirtualMachineEntry struct { @@ -48,11 +48,11 @@ type VMSSEntry struct { } type NonVmssUniformNodesEntry struct { - VMSSFlexVMNodeNames sets.Set[string] - VMSSFlexVMProviderIDs sets.Set[string] - AvSetVMNodeNames sets.Set[string] - AvSetVMProviderIDs sets.Set[string] - ClusterNodeNames sets.Set[string] + VMSSFlexVMNodeNames *utilsets.IgnoreCaseSet + VMSSFlexVMProviderIDs *utilsets.IgnoreCaseSet + AvSetVMNodeNames *utilsets.IgnoreCaseSet + AvSetVMProviderIDs *utilsets.IgnoreCaseSet + ClusterNodeNames *utilsets.IgnoreCaseSet } type VMManagementType string @@ -288,9 +288,6 @@ func (ss *ScaleSet) DeleteCacheForNode(nodeName string) error { } func (ss *ScaleSet) updateCache(nodeName, resourceGroupName, vmssName, instanceID string, updatedVM *compute.VirtualMachineScaleSetVM) error { - if ss.ManagedDiskController.DisableUpdateCache { - return nil - } // lock the VMSS entry to ensure a consistent view of the VM map when there are concurrent updates. cacheKey := getVMSSVMCacheKey(resourceGroupName, vmssName) ss.lockMap.LockEntry(cacheKey) @@ -327,10 +324,10 @@ func (ss *ScaleSet) updateCache(nodeName, resourceGroupName, vmssName, instanceI func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { getter := func(key string) (interface{}, error) { - vmssFlexVMNodeNames := sets.New[string]() - vmssFlexVMProviderIDs := sets.New[string]() - avSetVMNodeNames := sets.New[string]() - avSetVMProviderIDs := sets.New[string]() + vmssFlexVMNodeNames := utilsets.NewString() + vmssFlexVMProviderIDs := utilsets.NewString() + avSetVMNodeNames := utilsets.NewString() + avSetVMProviderIDs := utilsets.NewString() resourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go index 6721dceab..89b5a900f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss_repo.go @@ -39,7 +39,7 @@ func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName str klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr) return rerr } - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) return nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go index c1a4f8893..646785c75 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmssflex.go @@ -27,16 +27,19 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) var ( @@ -48,14 +51,13 @@ var ( type FlexScaleSet struct { *Cloud - vmssFlexCache azcache.Resource - + vmssFlexCache azcache.Resource vmssFlexVMNameToVmssID *sync.Map vmssFlexVMNameToNodeName *sync.Map vmssFlexVMCache azcache.Resource // lockMap in cache refresh - lockMap *lockMap + lockMap *LockMap } func newFlexScaleSet(ctx context.Context, az *Cloud) (VMSet, error) { @@ -268,19 +270,13 @@ func (fs *FlexScaleSet) GetPowerStatusByNodeName(name string) (powerState string return powerState, err } - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - statuses := *vm.InstanceView.Statuses - for _, status := range statuses { - state := pointer.StringDeref(status.Code, "") - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil - } - } + if vm.InstanceView != nil { + return vmutil.GetVMPowerState(ptr.Deref(vm.Name, ""), vm.InstanceView.Statuses), nil } // vm.InstanceView or vm.InstanceView.Statuses are nil when the VM is under deleting. klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) - return vmPowerStateUnknown, nil + return consts.VMPowerStateUnknown, nil } // GetPrimaryInterface gets machine primary network interface by node name. @@ -617,7 +613,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(_ *v1.Service, nodes []*v1.Node, ba // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmssFlex.ProvisioningState != nil && strings.EqualFold(*vmssFlex.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmssFlex.ProvisioningState != nil && strings.EqualFold(*vmssFlex.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("ensureVMSSFlexInPool: found vmss %s being deleted, skipping", vmssFlexID) continue } @@ -791,7 +787,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[stri // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.VirtualMachineScaleSetsDeallocating) { + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, consts.ProvisionStateDeleting) { klog.V(3).Infof("fs.EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) continue } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go index bd6b77535..924f08c8e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go @@ -18,7 +18,10 @@ package virtualmachine import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "k8s.io/utils/pointer" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" ) type Variant string @@ -145,6 +148,36 @@ func (vm *VirtualMachine) AsVirtualMachineScaleSetVM() *compute.VirtualMachineSc return vm.vmssVM } +func (vm *VirtualMachine) GetInstanceViewStatus() *[]compute.InstanceViewStatus { + if vm.IsVirtualMachine() && vm.vm != nil && + vm.vm.VirtualMachineProperties != nil && + vm.vm.VirtualMachineProperties.InstanceView != nil { + return vm.vm.VirtualMachineProperties.InstanceView.Statuses + } + if vm.IsVirtualMachineScaleSetVM() && + vm.vmssVM != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties.InstanceView != nil { + return vm.vmssVM.VirtualMachineScaleSetVMProperties.InstanceView.Statuses + } + return nil +} + +func (vm *VirtualMachine) GetProvisioningState() string { + if vm.IsVirtualMachine() && vm.vm != nil && + vm.vm.VirtualMachineProperties != nil && + vm.vm.VirtualMachineProperties.ProvisioningState != nil { + return *vm.vm.VirtualMachineProperties.ProvisioningState + } + if vm.IsVirtualMachineScaleSetVM() && + vm.vmssVM != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties != nil && + vm.vmssVM.VirtualMachineScaleSetVMProperties.ProvisioningState != nil { + return *vm.vmssVM.VirtualMachineScaleSetVMProperties.ProvisioningState + } + return consts.ProvisioningStateUnknown +} + // StringMap returns a map of strings built from the map of string pointers. The empty string is // used for nil pointers. func stringMap(msp map[string]*string) map[string]string { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go new file mode 100644 index 000000000..2562fcf6e --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/sets/string.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// IgnoreCaseSet is a set of strings that is case-insensitive. +type IgnoreCaseSet struct { + set sets.Set[string] +} + +// NewString creates a new IgnoreCaseSet with the given items. +func NewString(items ...string) *IgnoreCaseSet { + var lowerItems []string + for _, item := range items { + lowerItems = append(lowerItems, strings.ToLower(item)) + } + set := sets.New[string](lowerItems...) + return &IgnoreCaseSet{set: set} +} + +// Insert adds the given items to the set. It only works if the set is initialized. +func (s *IgnoreCaseSet) Insert(items ...string) { + var lowerItems []string + for _, item := range items { + lowerItems = append(lowerItems, strings.ToLower(item)) + } + for _, item := range lowerItems { + s.set.Insert(item) + } +} + +// SafeInsert creates a new IgnoreCaseSet with the given items if the set is not initialized. +// This is the recommended way to insert elements into the set. +func SafeInsert(s *IgnoreCaseSet, items ...string) *IgnoreCaseSet { + if s.Initialized() { + s.Insert(items...) + return s + } + return NewString(items...) +} + +// Delete removes the given item from the set. +// It will be a no-op if the set is not initialized or the item is not in the set. +func (s *IgnoreCaseSet) Delete(item string) bool { + var has bool + item = strings.ToLower(item) + if s.Initialized() && s.Has(item) { + s.set.Delete(item) + has = true + } + return has +} + +// Has returns true if the given item is in the set, and the set is initialized. +func (s *IgnoreCaseSet) Has(item string) bool { + if !s.Initialized() { + return false + } + return s.set.Has(strings.ToLower(item)) +} + +// Initialized returns true if the set is initialized. +func (s *IgnoreCaseSet) Initialized() bool { + return s != nil && s.set != nil +} + +// UnsortedList returns the items in the set in an arbitrary order. +func (s *IgnoreCaseSet) UnsortedList() []string { + if !s.Initialized() { + return []string{} + } + return s.set.UnsortedList() +} + +// Len returns the number of items in the set. +func (s *IgnoreCaseSet) Len() int { + if !s.Initialized() { + return 0 + } + return s.set.Len() +} diff --git a/vendor/k8s.io/cloud-provider/volume/constants.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go similarity index 59% rename from vendor/k8s.io/cloud-provider/volume/constants.go rename to vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go index d05f64ae2..10510ecc7 100644 --- a/vendor/k8s.io/cloud-provider/volume/constants.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/string/string.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package volume +package stringutils -const ( - // ProvisionedVolumeName is the name of a volume in an external cloud - // that is being provisioned and thus should be ignored by rest of Kubernetes. - ProvisionedVolumeName = "placeholder-for-provisioning" +import "strings" - // LabelMultiZoneDelimiter separates zones for volumes - LabelMultiZoneDelimiter = "__" -) +// HasPrefixCaseInsensitive returns true if the string has the prefix, case-insensitive. +func HasPrefixCaseInsensitive(s string, prefix string) bool { + return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go new file mode 100644 index 000000000..a5084f30b --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/util/vm/vm.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" + stringutils "sigs.k8s.io/cloud-provider-azure/pkg/util/string" +) + +// GetVMPowerState returns the power state of the VM +func GetVMPowerState(vmName string, vmStatuses *[]compute.InstanceViewStatus) string { + logger := klog.Background().WithName("getVMSSVMPowerState").WithValues("vmName", vmName) + if vmStatuses != nil { + for _, status := range *vmStatuses { + state := ptr.Deref(status.Code, "") + if stringutils.HasPrefixCaseInsensitive(state, consts.VMPowerStatePrefix) { + return strings.TrimPrefix(state, consts.VMPowerStatePrefix) + } + } + } + logger.V(3).Info("vm status is nil in the instance view or there is no power state in the status") + return consts.VMPowerStateUnknown +} + +// IsNotActiveVMState checks if the VM is in the active states +func IsNotActiveVMState(provisioningState, powerState string) bool { + return strings.EqualFold(provisioningState, consts.ProvisioningStateDeleting) || + strings.EqualFold(provisioningState, consts.ProvisioningStateUnknown) || + strings.EqualFold(powerState, consts.VMPowerStateUnknown) || + strings.EqualFold(powerState, consts.VMPowerStateStopped) || + strings.EqualFold(powerState, consts.VMPowerStateStopping) || + strings.EqualFold(powerState, consts.VMPowerStateDeallocated) || + strings.EqualFold(powerState, consts.VMPowerStateDeallocating) +} From 300c29256d2b770181400c2226ad64e45cba47c3 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 22 Feb 2024 13:38:17 +0000 Subject: [PATCH 074/157] doc: cut v1.24.0 release --- README.md | 2 +- charts/index.yaml | 100 +++--- charts/latest/blob-csi-driver-v1.24.0.tgz | Bin 0 -> 5958 bytes charts/latest/blob-csi-driver/Chart.yaml | 4 +- charts/latest/blob-csi-driver/values.yaml | 4 +- charts/v1.24.0/blob-csi-driver-v1.24.0.tgz | Bin 0 -> 5959 bytes charts/v1.24.0/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 216 ++++++++++++ .../templates/csi-blob-driver.yaml | 16 + .../templates/csi-blob-node.yaml | 327 ++++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.24.0/blob-csi-driver/values.yaml | 181 ++++++++++ deploy/csi-blob-controller.yaml | 2 +- deploy/csi-blob-node.yaml | 6 +- deploy/v1.24.0/csi-blob-controller.yaml | 144 ++++++++ deploy/v1.24.0/csi-blob-driver.yaml | 12 + deploy/v1.24.0/csi-blob-node.yaml | 242 +++++++++++++ deploy/v1.24.0/kustomization.yaml | 10 + deploy/v1.24.0/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.24.0/rbac-csi-blob-node.yaml | 30 ++ docs/install-blob-csi-driver.md | 2 +- docs/install-csi-driver-v1.24.0.md | 47 +++ 27 files changed, 1639 insertions(+), 51 deletions(-) create mode 100644 charts/latest/blob-csi-driver-v1.24.0.tgz create mode 100644 charts/v1.24.0/blob-csi-driver-v1.24.0.tgz create mode 100644 charts/v1.24.0/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.24.0/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.24.0/blob-csi-driver/values.yaml create mode 100644 deploy/v1.24.0/csi-blob-controller.yaml create mode 100644 deploy/v1.24.0/csi-blob-driver.yaml create mode 100644 deploy/v1.24.0/csi-blob-node.yaml create mode 100644 deploy/v1.24.0/kustomization.yaml create mode 100644 deploy/v1.24.0/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.24.0/rbac-csi-blob-node.yaml create mode 100644 docs/install-csi-driver-v1.24.0.md diff --git a/README.md b/README.md index 8c6e5be41..d8020729f 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,9 @@ Disclaimer: Deploying this driver manually is not an officially supported Micros |driver version |Image | supported k8s version | |----------------|------------------------------------------------------|-----------------------| |master branch |mcr.microsoft.com/k8s/csi/blob-csi:latest | 1.21+ | +|v1.24.0 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 | 1.21+ | |v1.23.3 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.23.3 | 1.21+ | |v1.22.5 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.22.5 | 1.21+ | -|v1.21.7 |mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.21.7 | 1.21+ | ### Driver parameters Please refer to `blob.csi.azure.com` [driver parameters](./docs/driver-parameters.md) diff --git a/charts/index.yaml b/charts/index.yaml index de298b5b9..e22a9b0d7 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -1,18 +1,36 @@ apiVersion: v1 entries: blob-csi-driver: + - apiVersion: v1 + appVersion: v1.24.0 + created: "2024-02-22T13:37:24.074374181Z" + description: Azure Blob Storage CSI driver + digest: 5cb79d7ed2379e34d84af10240e40bc6461ca5eb317f96aaf83984e4c144a29c + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v1.24.0.tgz + version: v1.24.0 + - apiVersion: v1 + appVersion: v1.24.0 + created: "2024-02-22T13:37:24.09898356Z" + description: Azure Blob Storage CSI driver + digest: c9ecca2dde77d9557f3917513c8f5adcb861c2cfa9fe1e08f5bef056ab583efd + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz + version: v1.24.0 - apiVersion: v1 appVersion: v1.23.3 - created: "2024-02-01T09:23:18.012841151Z" + created: "2024-02-22T13:37:24.097977419Z" description: Azure Blob Storage CSI driver - digest: 7f9c641bf822c5421fc291dd65cbf274bec66ab363078fba618a9ce7dec400eb + digest: 4acef5e84bcbc01fa624b73f14d1331ee14b14e52b4db018f0037fd2ada015c7 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz version: v1.23.3 - apiVersion: v1 appVersion: v1.23.2 - created: "2024-02-01T09:23:18.011599935Z" + created: "2024-02-22T13:37:24.097000052Z" description: Azure Blob Storage CSI driver digest: 057d6658c5879ee7e564d59275366521dc0a2e311c0527e570eaccd544622e60 name: blob-csi-driver @@ -21,7 +39,7 @@ entries: version: v1.23.2 - apiVersion: v1 appVersion: v1.23.1 - created: "2024-02-01T09:23:18.010964434Z" + created: "2024-02-22T13:37:24.096228034Z" description: Azure Blob Storage CSI driver digest: 66215f12a4e3acdcf09416d817b737e14546058b081a2cfd8bf9ef507229ca07 name: blob-csi-driver @@ -30,7 +48,7 @@ entries: version: v1.23.1 - apiVersion: v1 appVersion: v1.23.0 - created: "2024-02-01T09:23:18.010356842Z" + created: "2024-02-22T13:37:24.095605967Z" description: Azure Blob Storage CSI driver digest: 57151e21e33660522f25694bd8ae985e5e17c7ffe09904ad2af4025e8bf1da72 name: blob-csi-driver @@ -39,16 +57,16 @@ entries: version: v1.23.0 - apiVersion: v1 appVersion: v1.22.5 - created: "2024-02-01T09:23:18.009743904Z" + created: "2024-02-22T13:37:24.094798019Z" description: Azure Blob Storage CSI driver - digest: 653f8ea6121aa2e9d1bee8fc1e80f30960f0e81b40d356a019da9fa03c4af5e4 + digest: ff3c2c2e05dd048dd0af3e5c7d002eae2928a5d17fb269a1e4d5cadd30e8ab51 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.5/blob-csi-driver-v1.22.5.tgz version: v1.22.5 - apiVersion: v1 appVersion: v1.22.4 - created: "2024-02-01T09:23:18.009098476Z" + created: "2024-02-22T13:37:24.094172861Z" description: Azure Blob Storage CSI driver digest: 6c38e79d2f50616daac0658cfa5b1a569e6ff8ce8f24ed40f563e87fb1d1340a name: blob-csi-driver @@ -57,7 +75,7 @@ entries: version: v1.22.4 - apiVersion: v1 appVersion: v1.22.3 - created: "2024-02-01T09:23:18.008446506Z" + created: "2024-02-22T13:37:24.093547882Z" description: Azure Blob Storage CSI driver digest: 6cdee296d22ecd330f477f2ca6da51b07320c546c04ae46c23eef48146b772c1 name: blob-csi-driver @@ -66,7 +84,7 @@ entries: version: v1.22.3 - apiVersion: v1 appVersion: v1.22.2 - created: "2024-02-01T09:23:18.007784088Z" + created: "2024-02-22T13:37:24.092935133Z" description: Azure Blob Storage CSI driver digest: 259e66dc12db7310fe1c51e49c964398e0a6b7d511133916dd7d25f748f0b791 name: blob-csi-driver @@ -75,7 +93,7 @@ entries: version: v1.22.2 - apiVersion: v1 appVersion: v1.22.1 - created: "2024-02-01T09:23:18.006937854Z" + created: "2024-02-22T13:37:24.092139192Z" description: Azure Blob Storage CSI driver digest: 8329d477d55c82f97bb09fb172c5f39a1677bedc13c7410bd93b306194516438 name: blob-csi-driver @@ -84,7 +102,7 @@ entries: version: v1.22.1 - apiVersion: v1 appVersion: v1.21.7 - created: "2024-02-01T09:23:18.005346707Z" + created: "2024-02-22T13:37:24.091321566Z" description: Azure Blob Storage CSI driver digest: 1095721182d611e2556c611dd330758d8130fe66493db4f9189586a9219896d3 name: blob-csi-driver @@ -93,7 +111,7 @@ entries: version: v1.21.7 - apiVersion: v1 appVersion: v1.21.6 - created: "2024-02-01T09:23:18.004752018Z" + created: "2024-02-22T13:37:24.090397855Z" description: Azure Blob Storage CSI driver digest: d5ba1f92795ec45970eb6e5fc54aa13a5684f9936216c064f8a3843bf722bf54 name: blob-csi-driver @@ -102,7 +120,7 @@ entries: version: v1.21.6 - apiVersion: v1 appVersion: v1.21.5 - created: "2024-02-01T09:23:18.004171073Z" + created: "2024-02-22T13:37:24.089053236Z" description: Azure Blob Storage CSI driver digest: b403e9d49abfe076ecd83d6dd50166347ee4305f33dc840019474b2876723b9b name: blob-csi-driver @@ -111,7 +129,7 @@ entries: version: v1.21.5 - apiVersion: v1 appVersion: v1.21.4 - created: "2024-02-01T09:23:18.003597266Z" + created: "2024-02-22T13:37:24.088260575Z" description: Azure Blob Storage CSI driver digest: e4fa13670caf6b0d3e9fefa55d100daa439cd7187dabd45318ab03c7d4b17710 name: blob-csi-driver @@ -120,7 +138,7 @@ entries: version: v1.21.4 - apiVersion: v1 appVersion: v1.20.3 - created: "2024-02-01T09:23:18.002987933Z" + created: "2024-02-22T13:37:24.087502317Z" description: Azure Blob Storage CSI driver digest: 8c2c20547b2e0e1b39d2f2efd04c1bd778f14af5feae2bda86d722dac3c02643 name: blob-csi-driver @@ -129,7 +147,7 @@ entries: version: v1.20.3 - apiVersion: v1 appVersion: v1.19.6 - created: "2024-02-01T09:23:18.00174466Z" + created: "2024-02-22T13:37:24.086185619Z" description: Azure Blob Storage CSI driver digest: 0007ef225b5658d3989aa6fdc3a91a4b33696a438eee46ad9a675af615cbdf21 name: blob-csi-driver @@ -138,7 +156,7 @@ entries: version: v1.19.6 - apiVersion: v1 appVersion: v1.19.5 - created: "2024-02-01T09:23:18.001136542Z" + created: "2024-02-22T13:37:24.085599248Z" description: Azure Blob Storage CSI driver digest: 183c3e5cd84b709f1455cc7c84ed5bd573e8a24149fd6442d38999835b0a1711 name: blob-csi-driver @@ -147,7 +165,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.0 - created: "2024-02-01T09:23:18.000481131Z" + created: "2024-02-22T13:37:24.084845596Z" description: Azure Blob Storage CSI driver digest: 3eac15488da5be7d1e78431929f7cda35bceb1af3fe107ffbd84606e047c9204 name: blob-csi-driver @@ -156,7 +174,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2024-02-01T09:23:17.998975288Z" + created: "2024-02-22T13:37:24.084099665Z" description: Azure Blob Storage CSI driver digest: 22cfa17fc5e8d771ff8edd26729266a9a8ee55c0e150df85ef15698f7fe985e9 name: blob-csi-driver @@ -165,7 +183,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2024-02-01T09:23:17.997778881Z" + created: "2024-02-22T13:37:24.083286845Z" description: Azure Blob Storage CSI driver digest: bf6249c0e3e3d3d009d4c79ceb7fda9a56c0565b969de753628792ea3ea5ece8 name: blob-csi-driver @@ -174,7 +192,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2024-02-01T09:23:17.996897911Z" + created: "2024-02-22T13:37:24.081520549Z" description: Azure Blob Storage CSI driver digest: 8daa35cd4957695cb64b45da05a15b4020df5545a8ac44c4668dad4bba82c8a9 name: blob-csi-driver @@ -183,7 +201,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2024-02-01T09:23:17.995995384Z" + created: "2024-02-22T13:37:24.080610198Z" description: Azure Blob Storage CSI driver digest: 442bc579b231aab626b9e474e2c0ed3f101d47d61c99aa9a7f863af7ce268d9d name: blob-csi-driver @@ -192,7 +210,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2024-02-01T09:23:17.995159855Z" + created: "2024-02-22T13:37:24.079727411Z" description: Azure Blob Storage CSI driver digest: b577b0b771138109aa90eb09d56fc07273ca0b584a263ee8f789e35796279f31 name: blob-csi-driver @@ -201,7 +219,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2024-02-01T09:23:17.99433658Z" + created: "2024-02-22T13:37:24.078746878Z" description: Azure Blob Storage CSI driver digest: 124e87af2581b374b89a39940698620c23d3eae6dcee518d302461ffea93e9a8 name: blob-csi-driver @@ -210,7 +228,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2024-02-01T09:23:17.993482048Z" + created: "2024-02-22T13:37:24.07789702Z" description: Azure Blob Storage CSI driver digest: 07c4d76017491b3d0bdd70de90e814096938bf7916da0c149c3805294bd57560 name: blob-csi-driver @@ -219,7 +237,7 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2024-02-01T09:23:17.99257752Z" + created: "2024-02-22T13:37:24.077044763Z" description: Azure Blob Storage CSI driver digest: 79716efa958385adf57eb3570843e1b4512d8c801e8e070625e94264f3e917a9 name: blob-csi-driver @@ -228,7 +246,7 @@ entries: version: v1.10.0 - apiVersion: v1 appVersion: v1.9.0 - created: "2024-02-01T09:23:18.019074348Z" + created: "2024-02-22T13:37:24.106306714Z" description: Azure Blob Storage CSI driver digest: fca0b9215d3277346f68c643fb3ead75158971f0d1945ab01ec559196f3cf842 name: blob-csi-driver @@ -237,7 +255,7 @@ entries: version: v1.9.0 - apiVersion: v1 appVersion: v1.8.0 - created: "2024-02-01T09:23:18.018176202Z" + created: "2024-02-22T13:37:24.10548327Z" description: Azure Blob Storage CSI driver digest: 3b78e2ab4f33577c54d4f57276c824717d2ad2aa3741210e938fcaf927bc751f name: blob-csi-driver @@ -246,7 +264,7 @@ entries: version: v1.8.0 - apiVersion: v1 appVersion: v1.7.0 - created: "2024-02-01T09:23:18.017448813Z" + created: "2024-02-22T13:37:24.104677087Z" description: Azure Blob Storage CSI driver digest: 28da5b55c3d2689d6da85eb7da344385e9cb99bdb2af18c24fea93670abfe7ea name: blob-csi-driver @@ -255,7 +273,7 @@ entries: version: v1.7.0 - apiVersion: v1 appVersion: v1.6.0 - created: "2024-02-01T09:23:18.016717326Z" + created: "2024-02-22T13:37:24.102887936Z" description: Azure Blob Storage CSI driver digest: 6f24f2e6623f6f8862e47d4fbdf13b5f351ceec6bb9a4591ef7fc2fca9fc1eef name: blob-csi-driver @@ -264,7 +282,7 @@ entries: version: v1.6.0 - apiVersion: v1 appVersion: v1.5.0 - created: "2024-02-01T09:23:18.016021449Z" + created: "2024-02-22T13:37:24.102110093Z" description: Azure Blob Storage CSI driver digest: 95d14c9b70b319760d388ea47727c8c97e9287867a8852aeb67b7175b52fe8f5 name: blob-csi-driver @@ -273,7 +291,7 @@ entries: version: v1.5.0 - apiVersion: v1 appVersion: v1.4.1 - created: "2024-02-01T09:23:18.015308202Z" + created: "2024-02-22T13:37:24.101126167Z" description: Azure Blob Storage CSI driver digest: 5fcf69c449f065fa1d5722e5a7fed8a28000efa790907e9ff4b552c5fbd16d22 name: blob-csi-driver @@ -282,7 +300,7 @@ entries: version: v1.4.1 - apiVersion: v1 appVersion: v1.4.0 - created: "2024-02-01T09:23:18.014628729Z" + created: "2024-02-22T13:37:24.100368414Z" description: Azure Blob Storage CSI driver digest: b466543344a6411f6130ba87b093955d39ab8614c6b4ed8505a0a0c96073cb33 name: blob-csi-driver @@ -291,7 +309,7 @@ entries: version: v1.4.0 - apiVersion: v1 appVersion: v1.3.0 - created: "2024-02-01T09:23:18.013782983Z" + created: "2024-02-22T13:37:24.099690475Z" description: Azure Blob Storage CSI driver digest: 58d02cb70a3a966b349d62e880b7149fb06ac009474e35e580784fd3c98a5b07 name: blob-csi-driver @@ -300,7 +318,7 @@ entries: version: v1.3.0 - apiVersion: v1 appVersion: v1.2.0 - created: "2024-02-01T09:23:18.002358744Z" + created: "2024-02-22T13:37:24.086774045Z" description: Azure Blob Storage CSI driver digest: 27fb89f20b5fddc7329e6d7c2374857b22c1d61592e397a53f47121eea68c344 name: blob-csi-driver @@ -309,7 +327,7 @@ entries: version: v1.2.0 - apiVersion: v1 appVersion: v1.1.0 - created: "2024-02-01T09:23:17.991741748Z" + created: "2024-02-22T13:37:24.076165071Z" description: Azure Blob Storage CSI driver digest: a251a55243de207c69ef53f72abee45e93b72fa4fc43dc204b7f1cdfd459acdb name: blob-csi-driver @@ -318,7 +336,7 @@ entries: version: v1.1.0 - apiVersion: v1 appVersion: v1.0.0 - created: "2024-02-01T09:23:17.990943476Z" + created: "2024-02-22T13:37:24.075267487Z" description: Azure Blob Storage CSI driver digest: e83f037a165eafc83a978bd7e6bf6221b052ac34363aecb12e6a73607dc58b89 name: blob-csi-driver @@ -327,11 +345,11 @@ entries: version: v1.0.0 - apiVersion: v1 appVersion: latest - created: "2024-02-01T09:23:17.99033949Z" + created: "2024-02-22T13:37:24.073697222Z" description: Azure Blob Storage CSI driver - digest: 7e2732acd8fe3e6b26407065daa11adc28cfd6d67c83f60b38546ecf4bb88704 + digest: 2bd22695556c9625b6f11056cdd0058af1e02d6e85917944efde0bfae1bba3d1 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2024-02-01T09:23:17.989245883Z" +generated: "2024-02-22T13:37:24.072874067Z" diff --git a/charts/latest/blob-csi-driver-v1.24.0.tgz b/charts/latest/blob-csi-driver-v1.24.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..63765c77af83794521818c89655d5240649206a2 GIT binary patch literal 5958 zcmV-M7rE#kiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjbK5rZcwg&NV3gim@BX2l{E(|={KB!6R1-(`NKSh9?{qv6 zxsq@u0TuuitFFKMZvc?uvqZ^?o%V#jXiP447vN&C*av9hvWaC2VmX{FfHyDZn9Ifz zd+v98YpqtRb$WcP|KDo0vj1R~`+nljM(4QEsyQHRPJF2!p8pM9`vEDT6X;}up5BbuL4-V{@d+#=QO+ikB{1i_5U1=cz6cg z8bT8+U<5wvBG2XxkJy|EHkA#VdFrjpCJG864n7k^GQRAhCKIB07fgUtkRX&CH6wx2 zz!J~mRj(fSf$Lr~m)OfL8cc^wUULvYWep))kZaBsM5zkk)rcv<_2W(nn#YaSW|A&h z00kkgIh#N=icuF|bxF|cG}@ib1SxZ%rx)4?W<*Giw-~7E94=FfN8QsbR2Z`@$Or`a zeVZxfrbFtJ{@6HLPpW3SEO5dd<2snQ#J*5ZYPDCd&Skqy)Xi6;ZMAK$;N@f8E%V&;d zGb%Bmz?%x6rIA@aCkregn$u;ork`I2t~)l|bkQ%r)?OhCnf=XMhF@>_0hR`2h&`y) z1n>p1;oP=aK&6^rn*)~6MUn@AUZJ1WP}IL)qYxHh11!)N;<{?gri+87jH4wB8fXks zO~(|w0uY(1hv*BY`e7hI{ml{kLK})6qmnb%1>RL(+A^zg_0SR+y+Vpv#HMH*vJmnc z&Ur+iaXtN^)F#krIottJjZd(hofL(BWi)8fxKNzh%Gyw^S;=u8ZUx zkk1@Z^OEGzvn6;ntAd_fj~I0zPS8<3bYE~`qv z&8Q8&j2D2Oou*m@en{H#Ie@3_x@OH!@-x(5L@b2_&$8V>NZ^*ui6l0574coCt@`}HVa%wEhvWw zJaNHMN+4z&ISkaiO5}hK)Ip4*kK;kqy@r&;a$<{X#$^|Eemp%oF5dg*fL%F{!8$d) zJ-EciqKouj0llIhv66?bQAp9l3&YQ~iz&pfTsBiO?{?9N#$_Z~7;;4$zT(RxQbd0# z<~_YPowQmXN%)k*ZvluTlV4QaLBpRL)UOX#iDN-@0rv4Fp_scE4P4 zI!Qg%2niJuyKV%(0*!%J{*8LOaojkm7j^3t^y|bOJB?1GWjYFwAMyQ_5A+>zVHkLc zo?hlf{YP^)G(AIY{>gW+gx=fSpf0dIhife7UDWin>5hG+-*1QqV~IU~Ks88@By_Q- z?5}I!9udM=L#;P&-sE-fvueZAQ_l-4;HMJ7ztgGsf~7ro*zD_$XeT9HsEf>cI#;ZJ z)Tb09ceN>C8E`ENQ}Ld&z>iLKBf!qjoJhE$HYgSFZhW)!VTJ2GNTVml605V4k;R&^ zd%GzEzhMi~(CRDnD;OJE8FA+`P|Va7k3sbJIG9e!eHYcQ#$#&^uBYDW{_oki_u+D- zDe86f3axYyGF~k8_y@|ALJ|LXje7mJpWeSlgW6GEe?^0-HlokZdup*O!^<3C^a+YvSO_ws;2;lEJX3~Xh!+uK0TQbF z3w17&K-D1;3;igx;x$33PN71bDmBQUhrSRw*I?UG4M_}oO8!DzdRQ;*)jG7QE$jc4 z;Q20=AezIgoBp^V@8y$eY>NLJx7r#1@5$-un}h%NIogd&mXLaoR85><;${|6Ik9A~OdguOMS5@Wf|8@?p4_q{)|HM$xhV|b$ zIz7&;|I?EP6D`s4S#Ke3~ zsGOqu--PuyQBM!R{5w1yqCd3DOlhh5|Er_=ZGD%)FwbMkL^Fy_lrb3r7$3C3R;&ZN z=-~lHYdiIA8mO?7J`hvebj)hca+Zz1yo_Hr!KZpjsO<($*=R~Qvzg~Jst*eZ_j(@A z$6Q)$y7kCIE7zJV(_@c<91-u+=*yq4awduquF@YAwp^D#&~E{gsabt|tf{k3L(H36 z42y;k%4Z|VPPmeiaS8JpN%kM?xoaR+gi~MEEPFq2-Bny}%6RP17pV9<`*G+j@v&nL zv+*@WI2Uu%oj9g5SlbV+!g6YQOGc9(*h}l&Z>g2<{{=CU9n(VA@Bf`vC!7D(X}4Mj z`~O+m9idJa^|WE{0hP66LKyaa(Ok4^QOGlP=BTw6DU(<{Xt+HXsL=IXX2RD{xLX)^ zl@6M2LW@v-+i+Lg#UF?g>HwAKSb+;4bhtSVVHKl~-Ww9pF4PTOxYC|qCfKaZXc4RV z9WG}1)O3v2Ur0VN@!GkV;$#b6s!rU$a$RDropok4B2QMbk8$orppD2?)YQKCy5zws zQeyCj+PQ?}Dli{u@XSUQzh5(i#0HQ?oyZ!9`y>($x&>QjrrRGVNi{PdBY^6m_OBIu zq{ywCARjq;leE{9$<12vHmO?CCYv2$u6LSv$)`(2v5O@#0TTC1r_2nq$|e0efi1?6 z@`bNHeA)zKqKmWxw@lEI>L8ABCj@DIC^Pisrsk$xsLS`Aj6mKuvqyTAX$2xrf?*#g4;4h_mm1 zIPdjF{qgu81%NJK2e<`cX4qzk3a-ckBuY|QAkW&JR!bDThkMDfWzyiRXpcWjZ;mTq z9lFOpv3~Q#*-2|1#3bNCp0!&=Kw*5(nknP{>?cMUzy3S|+r`8)k-i)QKo{9BU<5y$ z;nX{1f^J>Z46{ZXg4uUj8M;nni*6E+II}b@84De3)WOS~gqZlZ37SVq$Z<2=E51Nl z4&jN{aeN#V*cmBnF$B!8o3KFIog~B2q(vcj%cKrhP!_2m{eE@D8r1n^n+D!dQ|yak z6+MyG2xS6kAobT+t=J=x128Enme066D>iJ>+{P&_1v9f&{?mAN5&ty;u3Gu(*g^bL z+-{V9o!(nP=Jm;BucDsEn3p#Vv!h_|hQHc&XD!M^F3fx=-hY5PJ|k3~egDJ7cyQYv z_O7o6!<(WVF-+82EtXb8|JN-hn@H;_rfj@OtEgLu!90zQq(vLJc1T(#-Czb(7WB@f ziWDP?F<3%-*$WV5vP()h(%E$;dD=40Pb)jD#KaWl-ff3DwlXTBUt@si6g~?)UoKPh zO3)YP17M`J72+Gq;OwF0OZBBQ3jyltYk;lnZm{ zOq*ij1{`kY9K@Wtj!9*V+gwWjJ;-jBl2Oei6QIXd^o~#>33hwn;^i1@Mjg>bN39f0 zxWTPnx6`kE;DkBFoy*O@|Nm*!zr7gsd;Q_f;QaFT-QcocWE8&R%*&2dAFZZj_7VFZ zU^#+m9w7QGVZJK|$5MyG0yv$ht7fm@U6fswNX*O*B&j5ejoN^c`);#YZc2n(8-=X4 ziSx=(y`B}kh=HGOZm!3-*Q2X{|EHLKUW+C0W{?){FB>?l!r=D&ZNpw#sNP@Qj(Z<& zFR$L;UiLrtFRN0`=LVKc1N;{22&MB-T_>)LCAROh5-gnx!(e~-c`&*fe(Vo#wAf#t z-~3c^9s+46Ds-BfMWN_u!j1n5MmJB;X6tbkd3&}qQ?_$vpoNIywY=Z2B80of*j1bf z;WlmHGOOM>VCRau%L4tdekCQC)xO1s$^Jz3DL9Pd=Vdj4-16_TA2Cb7zavc>TapJt zHnlgxmYFZI{lx5WhEOuQ@#v7OK&|Ft3+rO`?p`azTj=JBAa{4GJ&{(m^F>w1S)tqQ zIh<*Kx8|5SE?7o^S%d}xEa05+Go5Gt_m}loMRWhHAJ$`5OO~5qw7|(-$e%u0=)#kS z{CX`^@pC53VA5vi=8eO+|9gb%c*XLHn@q12%uYK;u`1rkV_q+phcLI<9rngf`7W># z*@i7)Q_L|F*eQ?r6p3uMb73$y!NE%>P5gygh5Vn;wBG$nm`(ZrC#RYFUplRJ=Wzea zvoy=HQYk;ew&x@GEg&46xX#gM z&VWwY6;0N#L7?;7J!ktzZ8~tTpSF*=4F(*Lw{>@)E%gK#Ic5$K}ovE*37EUXMyHZh?yQ*Rxa zLD|~%EFbCj5l3N4N)q8NnS;5JzDUks0T+jC!57OGEF9QNB=m(WFrF5ag6kNH0OI=c zbc*2hpqFwdtWr6U@CLXivVj)M=&Js`&mp`uq9BYC-i9Jg z<$u1#mkkV=9MD3Tuq?muouaTHTn5;+;)42EN)pRK&wDa;wqUKNVK%RK^WfXfZ`9-2 zYi6UGnTs{qjp3CjR&*bU<*UVh$l)mWvF1F6nX4D0W;4&j)XBMHo9U@;sxQY$Wy*_XOTd;PbA^Wp8g(be##KkOB|Nb8hQLM3o)OR~r%DH=?J;rQnK z^78iW<<;AFpT>oO;Pl*UnNHo|)5zSzQ{WLc#3CZMpZlZn;A*%NvCNG?rNpu$>+EA_ zosDDb>}7DBZAZ5*%$uWGWDb-O8uxE*Kc4^l_UgJn)G^cB;ir!~jV*WSQYlSE?@6S* zYIJ&o@%h`!{_Ush-uX?x_jV^*nH!-p#5RW#l7Aags2tTn+EfSi^calkO+&%95^dRl~M0I<~E*Gv3z$* z3P|ku1;Z`Il@LA?2@Z#j9v_>&8_Wn5&C(D`5ffh}sUYs0`XcL5Btdu8fdxzPP)+a* z)dXgr+gBYy`skYMnLbynMPg198C*G@tDQOOmey;;Is5*H51-!lZ--aC{_XJmW50;< zu5}9)ssuF8U!y*t9PygUfN!tTqDrlT@#a;iQb-HZ3}lozCM1^PtRNsRmiJqM*B^Q@NL)LV>Mt5nO6ukFnt%--v}ZYqhsx;Yy79V{)ZZ*R7~5IMJq z18Pf^`E^z8-siOwu5ZLO!zo#|sldQ)^+;wyN-L4JEL7Rb+~2K4alhf0)6{OwYSlC~PEi|dqt>y*f8=I8eE0J^iWu7wY_R!{oJg^D;l)XIE~TA5vA zEYDG6v+gWecuwQY$1`kxyYVe?bQ-nRa)m7DKH6~qdZj*TL|!T~7I!vzY07HauS6c| zgkHQ(s5qWqo;MOPo@9>svbp+=!lpJmoBBt&(yT>-sI1c5>oi7}Oqw2Z0vEEg8HFGw zR`L7&)=6c&BZU=G+s>broQUWLD-QmXa{ozLgg!Na^_Smrwi2Zj^QA7f>igK<*WmMR zZn+O%S^`xNS6y39lLXxuTit{y6JuQyU)@yVlfbbK^hxgO^igs{>DC&7b8m%u4@lb5OLi*I+Y zALMFjclD10V4vucKhZ_M)a$=$ObNUBm%6{I@e3`$5NH8>q5)#tGWB|`sSo6zbX zW}ei)9JTL*?zaO$eFtY{s;0K7h*r9GvxssCVY^s}isP&}CRhNKJIq`-n%AiAmyz2% zEL#w@S?zT|PeN!j#vMYFOQMK|u8rHvq}43Ts^wSfTS6T|XJ2E{W-dRrCLyWaOMZjN zf5DdFsx?Cyr>b{qu{p5K72#D%5|y?IUG{mE_Z0mViN%_EFZ+03Rn>+U>iikhYy}mu zXD$0A3QhC#c3DC@j-ptvK24!MyLM9Y{oJ}~KLY+2P)eDJsH~9os*ltcDg9cj$%)dP zo=+)-KWkMY6P`uc7YM^a&F{aJ>;F&yun~Z# zc!D?S|5onzACFrH|L^m(TmU0&=*2rhCUdb+k*qzTZLlG?zc^|OCAjU~MAhykN?rc5 zuA4z`5hpbvYq{9Ou>^{%aBScHpZEJZS!d=0CJUJG4VPv}bPr7XSeN|3bj(t^lY20RI8M?EnA( literal 0 HcmV?d00001 diff --git a/charts/latest/blob-csi-driver/Chart.yaml b/charts/latest/blob-csi-driver/Chart.yaml index 70bc7226c..09f926b1c 100644 --- a/charts/latest/blob-csi-driver/Chart.yaml +++ b/charts/latest/blob-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: latest +appVersion: v1.24.0 description: Azure Blob Storage CSI driver name: blob-csi-driver -version: v0.0.0 +version: v1.24.0 diff --git a/charts/latest/blob-csi-driver/values.yaml b/charts/latest/blob-csi-driver/values.yaml index 07d8b214b..f11e252d4 100644 --- a/charts/latest/blob-csi-driver/values.yaml +++ b/charts/latest/blob-csi-driver/values.yaml @@ -1,8 +1,8 @@ image: baseRepo: mcr.microsoft.com blob: - repository: /k8s/csi/blob-csi - tag: latest + repository: /oss/kubernetes-csi/blob-csi + tag: v1.24.0 pullPolicy: IfNotPresent csiProvisioner: repository: /oss/kubernetes-csi/csi-provisioner diff --git a/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz b/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7916f52c6486657b4dd387c3ad0d18bc8e3f09a7 GIT binary patch literal 5959 zcmV-N7r5vjiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjb0RnLcwX}njI0Xqp$x$;B zC=D#}EME2MfgiZ;kh#QOc2IvhU~XC;hQ;yq;9ec3I$rJH{}WxWv9xPinPSuh0mlz=7I;Btwh>VjOvl0}^0! zkU~A4015;+=*t|aCPU~9X?Yb21&3(|&8753rGQb|x zY6AFz*l=OnETB@&ugw8V=pe}hK(ElxYAEX8uTcn#umKk63vpdFX4Az%Q^wJf1r0O? zsitF!T>*$p)kE|JQ~fXyp#J8FeW49Sk5S2)>jLknFKwCCxO!*_j9wwdEMikM4p|8K z4d*fh$sKzJQ&Q6L#zcL!MXk02zZDnmJ)R@;W3ZK#S?JXVYt!&rZciEbQuWAyi7krB|{!a>|lv&wgyRbMCcW| z`AP#~B+y*4(jLWLh zZ!>CxFXIKEXQ!zafgh5#d=B7gyRKRDll%Yv|4ls3q%>^j7c3P1I-Bq8J_|KV;1CU=Bzgy`EIVWN@KQ;?oM=dCa z2t0AYQA!|Y961csyh`MN57a@7qL1T#)V+q3#BySbA>*=x+CQEhpA_$XbHJ{g$6)Q6 z-X2_HW6?qSuYg|Dk66h=$0(%e;ichc=wb@-E0@ib%)1?Qs&N@f7KU8WhOhYYh!oLZ zig{1(O{cBaM-o2e@LK>P31$0%k^(Q+tsV6$7;ibTqbCxJ&82c0?I+U6ZscEU>hN?* z)N&40oYMYdP^y*j@R4LL@KF0|EEc+hWe3&uPQjTAjkJdHv;@^dip*EN6}Ecso(L(T z7ciZIEjy?_U}Jj@PT)el68>;;_(~YRIu+{)WNLbA&8sE#_{9{Ge%q%mq41fxfd?O8 zd12ci#Lq;|`wmd4#UWbc9=`uX@d6W9X{Qc4DpEP8)l|-LPH6yDSl_yAG7SU_IlEu3 zIGv=PYJ`LeiCs5>UxCKJEB{9QsBzLbtrvA`7xZh#9ovm|qh&e@kRS2=wGZ?iabXa6 ziJo5OMg2!}HZ(m$ZT`u3u!Qd0+@LP8J%=Hd^A2iy+H}Xh((gCKgR#V(-=`X+OA@-+ zQ})*oxJQIA)==xsn>Tsg`>fiq^wjgh3izo+@b7dgzF=w39X9*ABicy`7wRIjo-P#Y zAN484$X#s;SO#3n!c@HHEbyaK-3YMrGba+RsSQd6yc^#veOTdo57Ov~vBc`EWMr{s z?A}hwz;D=sG_?8({R+m0Rz}>#3=}hU#bXftJr1T*a^FGq>+#r{gX^icy8nAN?tZvh zX^MIsy+SJ;gp3zUJ^q0*rBK8_UZZaB?WgyzQGf96`ZfCbVl?Ov-oHk@(dc@l%rIq3 zhw4Q$p&rD4*IYt_do>!RN?nj0)LdZRbjhR{KB?(7EjjBT2POp5T1^>sF2l}BX=*i1 zfXaS1B=ZXW!n6&J-Ct3Es*UK2i>_Mi%J4D=7=41`78ZicC^*Q&6wj0)7~)05Sb&7; z{z9G0Bv5ro#6mv`t$0mPs#B;?r%Dad@1ie6&NbL}R6`Q|u9CkHmmbzjd$kU&YRme6 zC3wDzC5YzW`ldH-$b0!@8k^!jC#|E5|M&Fl?DXLOeU5gc)Xln>j#4eN@FePh@3JLm z2a0x7gj%IhtKBeU&%wUifXsL{B$k1AjeHld0Cj%s)I!B-OBY!djSScGhX>Rcfre|W z%vtpKs8}+{EK7tN;dveVe#S;um~u$a<6|9JmfpGl*Hu;d`oEon>jM`J=|3?Pv|;_X zkIzmr>;LTZP3y4!pQAlIH2;Yf#Or8-U`kw&OCQcXH7)iWI_RIx$H!VtgZ1uxOdV|} z(9rX%O$(hepy3`KEabqHP(XEU?C| z)JA8=`k%xzh9Ih2F*2Zz{cBy^Jh8M$~Vl|9BDRx)QTC`ATcqY z6Dp^u{x@O$P1Mr^F#iruhv*OOGE-Wr{{QNzep}yVFwFCqGSQ4;6J<<;g!0))vJI)N?5 zkn)AEK785)W1@q!1Gh}jlIkFiaVG?6eJC^Z<)-GQT&lCglCkp&BvWz663Xj@E;(Tt z`*4$>6=aDdK348+(zuO2cRE93#kv4n8TLcY)EP8>j*vQ!cF-uGl6cTHC&;zqOGH9q z!m04c79);r-IymU5w1;UABLZKj>V9JKH)X%>1EYO=R7pxISiqJk^30Ev=R7Rd9XcB>@{-ow4**fMEwR7;{3F=4q_5;A(4Z-ZYtPBkk*`k}oBhD;MOU6P68+Gt9Cm|;OZGz@e5^~%O_lhr& zmP2^rbsQf@1$IWtS_}a*>?SObqjr+vXwssPyJb>`D=3Rpkbb|qVh!s2vP}c;s44bE zv5KBZYlJd^G?4mhtXAx?$N`uX70YK_o);T7X>Q|`mV%jCEB|RcyNLf90avYjb?hMi zDQ-7PzfSM1AoKcUvR6^hW6aB&hS^cDcf((8yR#N$A{S;p6z@Mk9iI^@&%giSa@@b| z4Z6eY{@|u)M+_6SR*R+8(EoLc$tKdeiYXf}(kki}VlYpmBWckFt{sw=NjI1Ql?A;s zsUpRQVhonhUiJb+ne37hj&ydNNuIWh^V7->D={&Jxp&)Pj;)M}=+_t^I)%>y&zH*- zy%O|=`G6VBO8?~~bY&fz48EtRN3`0X4aD!6cRpv5+05;ro6F%YY@}tkka7r8k#b=U zooQ1{+!b8*A2?x7ap!U~@c(}r^=>amy>4%C)4#a7eb>M06&Z!^IPZ;i*co$_?B@#2U14$~$Vxu;o^SYrE5E5XvKFbwtvpZlZh!N=anX;WT11&@pujT!I6(QU;#;)Q_ z2)Ahims$1B0Xx^!T^8tv^(!gCtoAK7O!gqXkaxLjLs0LKmJq zuk;!(|H2wjf*ri5>OGEmynmr3#2A$JE$iS79aehUZ(C$4k! znbW6Jc1@EtY`My{1Y=?NtC4~*ESjzFicG;?+Ae%#4y;P(hH74fG*_pn2R(ld9&qgH zlDj*gqf{-Qg>c{y(229yC!JPN;h53INJVwW`L;9bVj(sNnWf1P9HUeE#J(e6xn1z+$$|fdsX6mgY zGbmfTp5-I`KH?}$Nl7ByC37%0(ih1YEa2jhE%;*Df`tQniG;q81;*2YQg9tZ5kOpD zo=y=Q_PZ%}()v8Y1fWAjw5vxsVA1(vn*>6|;;yNKxtueza(2(Ai*r3A(hAu{?y0#j zKp8;YfD<|k?++n#)}K+fdfdAQJJ7durp|at25*3SA{%J2jIQe4`y9esBMQPO;cY0= zRQ~5%eA&Q&$v!QF3Cr>e-zf?U!exM6D=w&yr6jQ&^t>liXA9PP8fNo)HxItu{6;;V zy=FG5nYmn(-56epVnz3nSiV~9ha8S_A8XEIn7Mi}YBuvcOr4xNwwa#lrusq+F;DtL zOXVw+-UV@CrVdIOmwl;gwA*{zzZl%U8(j}>dV_AUi?mJ|B~${(wj_&OlA^)XAB=A< zuC8w1UR}R^_i0=h2u{zvmg&?TK8?&hJOv(MLo6b4`?)t7_pb*#5zE{NR7xy6vi3fP z*4{X__Fe|p-gb2B!n`?}Mdm;mp>gl#_T$CBZ?A{FfsUEp4nBR{X>7Smmr7|WdQT$d zRio4Gk1yU{^=>~6yB9aT?%SPcWp0GZ5ZfF|P_~m5h;@Mk1=7rdWcyn#v+=qZWrU@f z2Tb7tX>Y<3gRIN6&*aX=_?{!Fa?AMmA+h(*o!F}vX4RZEg=cZzt{j}x$al@5ExrJ{ zg1i-6z_wqh+ejcU2}NCYl)$e+eS2w)v(n{)g_U|G7819rfPd zj;}92+zu~pe#&D=%h;OCL%5H<2vIAvHx1?u7mM$AAQ2XiIdEQ#Dx=aDA|}2{QbF7~^+ndBNP_OF0}Gbop_AgA3nY9-43q1z1zXX$6gWT zUF#MqR0(LFzeasPIpQ^y0pDJwMU`3wXFQZlvW~bS*Ws=xxZV9%Aru?P$-f^RkAH^ z#+u$850xmD_}irtC2ciS7uP8P*C~*(=rn4ro33MY$Zx5=1W~{)%UTzufgZt z+;Shjv;?XkuDZ6ICJDMRwz>&ZCdRrZzPhQzCxK%f=#$*l>7(R^(ycXy&+aVl#6gMO z^W=%Tjek>g7q&`0@#`6TQa#-o>#g#1q;eZpzhjjak5V)5P|)`>1%2C_WgDch(*96g z9CQ1Jwbn9Q%UYSSSEbnWuT=$EUjM@<*tYg^3HXNkzq8}BDvq<_m|y`^?l5!VXkMebUq){8 zuxvrpX0_J=Jqe-B7oqdf(o4NehnuMfwFZm57 z{{>rytJVxkz)BE#QR|e{vqa6_ua7IN@28eSt6>)cpQix&99Y02={# ziYIuJ{%_@e|M8@C@c%wf%LOpfhF-iAWHJ{E70KEY+6EhP`-`KtP=eduO;qh(qSWO- z>$(~A7I9J&vX+ZY97~|M3daU+Bk3f%hfTkqdr+-^owmLH-~J@9QU9O5$>{&~>Dl4; p|DUD3nEsCq%7f-VX#PVxv_m_zLwn}-e*pjh|NkAq7xe(B002zNxGDeu literal 0 HcmV?d00001 diff --git a/charts/v1.24.0/blob-csi-driver/Chart.yaml b/charts/v1.24.0/blob-csi-driver/Chart.yaml new file mode 100644 index 000000000..09f926b1c --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.24.0 +description: Azure Blob Storage CSI driver +name: blob-csi-driver +version: v1.24.0 diff --git a/charts/v1.24.0/blob-csi-driver/templates/NOTES.txt b/charts/v1.24.0/blob-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..c75dafbb5 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Blob Storage CSI driver is getting deployed to your cluster. + +To check Azure Blob Storage CSI driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/name={{ .Release.Name }}" --watch diff --git a/charts/v1.24.0/blob-csi-driver/templates/_helpers.tpl b/charts/v1.24.0/blob-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..d99392f32 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "blob.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "blob.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common selectors. +*/}} +{{- define "blob.selectorLabels" -}} +app.kubernetes.io/name: {{ template "blob.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels. +*/}} +{{- define "blob.labels" -}} +{{- include "blob.selectorLabels" . }} +app.kubernetes.io/component: csi-driver +app.kubernetes.io/part-of: {{ template "blob.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +helm.sh/chart: {{ template "blob.chart" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- end -}} + + +{{/* pull secrets for containers */}} +{{- define "blob.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml new file mode 100644 index 000000000..9ece72de3 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml @@ -0,0 +1,216 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--timeout=1200s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + - "--feature-gates=HonorPVReclaimPolicy=true" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-driver.yaml new file mode 100644 index 000000000..9c5de5b91 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-driver.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + labels: + {{- include "blob.labels" . | nindent 4 }} +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: {{ .Values.feature.fsGroupPolicy }} + volumeLifecycleModes: + - Persistent + - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml new file mode 100644 index 000000000..328a911e9 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml @@ -0,0 +1,327 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.node.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.node.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if or .Values.node.enableBlobfuseProxy .Values.node.enableAznfsMount }} + hostPID: true + {{- end }} + hostNetwork: true + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.node.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + {{- if .Values.node.affinity }} +{{- toYaml .Values.node.affinity | nindent 8 }} + {{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + initContainers: + - name: install-blobfuse-proxy +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE + value: "{{ .Values.node.blobfuseProxy.installBlobfuse }}" + - name: BLOBFUSE_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuseVersion }}" + - name: INSTALL_BLOBFUSE2 + value: "{{ .Values.node.blobfuseProxy.installBlobfuse2 }}" + - name: BLOBFUSE2_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuse2Version }}" + - name: INSTALL_BLOBFUSE_PROXY + value: "{{ .Values.node.enableBlobfuseProxy }}" + - name: SET_MAX_OPEN_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.setMaxOpenFileNum }}" + - name: MAX_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" + - name: DISABLE_UPDATEDB + value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-usr-local + mountPath: /host/usr/local + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}" + - "--append-timestamp-cache-dir={{ .Values.node.appendTimeStampInCacheDir }}" + - "--mount-permissions={{ .Values.node.mountPermissions }}" + - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" + - "--enable-aznfs-mount={{ .Values.node.enableAznfsMount }}" + - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true + {{- end }} + resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} +{{- if .Values.node.enableAznfsMount }} + - name: aznfswatchdog +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + command: + - "aznfswatchdog" + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + resources: {{- toYaml .Values.node.resources.aznfswatchdog | nindent 12 }} + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir +{{- end }} + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-usr-local + hostPath: + path: /usr/local + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: {{ .Values.node.blobfuseCachePath }} + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..833dcc640 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml @@ -0,0 +1,115 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..c041cf8db --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml @@ -0,0 +1,29 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml new file mode 100644 index 000000000..7433bccf1 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml new file mode 100644 index 000000000..a25090e30 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.24.0/blob-csi-driver/values.yaml b/charts/v1.24.0/blob-csi-driver/values.yaml new file mode 100644 index 000000000..f11e252d4 --- /dev/null +++ b/charts/v1.24.0/blob-csi-driver/values.yaml @@ -0,0 +1,181 @@ +image: + baseRepo: mcr.microsoft.com + blob: + repository: /oss/kubernetes-csi/blob-csi + tag: v1.24.0 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v4.0.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.12.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.10.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.9.3 + pullPolicy: IfNotPresent + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: myRegistryKeySecretName + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-blob-controller-sa # Name of Service Account to be created or used + node: csi-blob-node-sa # Name of Service Account to be created or used + +rbac: + create: true + name: blob + +## Collection of annotations to add to all the pods +podAnnotations: {} +## Collection of labels to add to all the pods +podLabels: {} +# -- Custom labels to add into metadata +customLabels: {} + # k8s-app: blob-csi-driver + +## Leverage a PriorityClass to ensure your pods survive resource shortages +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: system-cluster-critical +## Security context give the opportunity to run container as nonroot by setting a securityContext +## by example : +## securityContext: { runAsUser: 1001 } +securityContext: {} + +controller: + name: csi-blob-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + metricsPort: 29634 + livenessProbe: + healthPort: 29632 + replicas: 2 + runOnMaster: false + runOnControlPlane: false + logLevel: 5 + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + +node: + name: csi-blob-node + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + allowInlineVolumeKeyAccessWithIdentity: false + maxUnavailable: 1 + metricsPort: 29635 + livenessProbe: + healthPort: 29633 + logLevel: 5 + enableBlobfuseProxy: true + blobfuseProxy: + installBlobfuse: true + blobfuseVersion: "1.4.5" + installBlobfuse2: true + blobfuse2Version: "2.2.0" + setMaxOpenFileNum: true + maxOpenFileNum: "9000000" + disableUpdateDB: true + blobfuseCachePath: /mnt + appendTimeStampInCacheDir: false + mountPermissions: 0777 + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + aznfswatchdog: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - operator: "Exists" + enableAznfsMount: true + +feature: + fsGroupPolicy: ReadWriteOnceWithFSType + enableGetVolumeStats: false + +driver: + name: blob.csi.azure.com + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + +linux: + kubelet: /var/lib/kubelet + distro: debian + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/deploy/csi-blob-controller.yaml b/deploy/csi-blob-controller.yaml index 9dc5738f9..415d5a055 100644 --- a/deploy/csi-blob-controller.yaml +++ b/deploy/csi-blob-controller.yaml @@ -73,7 +73,7 @@ spec: cpu: 10m memory: 20Mi - name: blob - image: mcr.microsoft.com/k8s/csi/blob-csi:latest + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 imagePullPolicy: IfNotPresent args: - "--v=5" diff --git a/deploy/csi-blob-node.yaml b/deploy/csi-blob-node.yaml index 3203f029e..d4879c70e 100644 --- a/deploy/csi-blob-node.yaml +++ b/deploy/csi-blob-node.yaml @@ -40,7 +40,7 @@ spec: - operator: "Exists" initContainers: - name: install-blobfuse-proxy - image: mcr.microsoft.com/k8s/csi/blob-csi:latest + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 imagePullPolicy: IfNotPresent command: - "/blobfuse-proxy/init.sh" @@ -120,7 +120,7 @@ spec: cpu: 10m memory: 20Mi - name: blob - image: mcr.microsoft.com/k8s/csi/blob-csi:latest + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 imagePullPolicy: IfNotPresent args: - "--v=5" @@ -183,7 +183,7 @@ spec: cpu: 10m memory: 20Mi - name: aznfswatchdog - image: mcr.microsoft.com/k8s/csi/blob-csi:latest + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 command: - "aznfswatchdog" imagePullPolicy: IfNotPresent diff --git a/deploy/v1.24.0/csi-blob-controller.yaml b/deploy/v1.24.0/csi-blob-controller.yaml new file mode 100644 index 000000000..415d5a055 --- /dev/null +++ b/deploy/v1.24.0/csi-blob-controller.yaml @@ -0,0 +1,144 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-blob-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app: csi-blob-controller + template: + metadata: + labels: + app: csi-blob-controller + spec: + hostNetwork: true + serviceAccountName: csi-blob-controller-sa + nodeSelector: + kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: csi-provisioner + image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v4.0.0 + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace=kube-system" + - "--timeout=1200s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + - "--feature-gates=HonorPVReclaimPolicy=true" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + - name: liveness-probe + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29632 + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:29634" + - "--user-agent-suffix=OSS-kubectl" + ports: + - containerPort: 29632 + name: healthz + protocol: TCP + - containerPort: 29634 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + resources: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + - name: csi-resizer + image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.9.3 + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace=kube-system" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate diff --git a/deploy/v1.24.0/csi-blob-driver.yaml b/deploy/v1.24.0/csi-blob-driver.yaml new file mode 100644 index 000000000..d2de725d8 --- /dev/null +++ b/deploy/v1.24.0/csi-blob-driver.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: blob.csi.azure.com +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: ReadWriteOnceWithFSType + volumeLifecycleModes: + - Persistent + - Ephemeral diff --git a/deploy/v1.24.0/csi-blob-node.yaml b/deploy/v1.24.0/csi-blob-node.yaml new file mode 100644 index 000000000..d4879c70e --- /dev/null +++ b/deploy/v1.24.0/csi-blob-node.yaml @@ -0,0 +1,242 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-blob-node + namespace: kube-system +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: csi-blob-node + template: + metadata: + labels: + app: csi-blob-node + spec: + hostNetwork: true + hostPID: true + dnsPolicy: Default + serviceAccountName: csi-blob-node-sa + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - operator: "Exists" + initContainers: + - name: install-blobfuse-proxy + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE_PROXY + value: "true" + - name: INSTALL_BLOBFUSE + value: "true" + - name: BLOBFUSE_VERSION + value: "1.4.5" + - name: INSTALL_BLOBFUSE2 + value: "true" + - name: BLOBFUSE2_VERSION + value: "2.2.0" + - name: SET_MAX_OPEN_FILE_NUM + value: "true" + - name: MAX_FILE_NUM + value: "9000000" + - name: DISABLE_UPDATEDB + value: "true" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-usr-local + mountPath: /host/usr/local + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port=29633 + - --v=2 + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: node-driver-registrar + image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.10.0 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/blob.csi.azure.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + - name: blob + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + imagePullPolicy: IfNotPresent + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--enable-blobfuse-proxy=false" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--user-agent-suffix=OSS-kubectl" + - "--metrics-address=0.0.0.0:29635" + - "--enable-aznfs-mount=true" + ports: + - containerPort: 29633 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true + resources: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + - name: aznfswatchdog + image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + command: + - "aznfswatchdog" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + resources: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /var/lib/kubelet/ + mountPropagation: Bidirectional + name: mountpoint-dir + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-usr-local + hostPath: + path: /usr/local + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: /var/lib/kubelet/plugins/blob.csi.azure.com + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: /mnt + type: DirectoryOrCreate + name: blob-cache + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate +--- diff --git a/deploy/v1.24.0/kustomization.yaml b/deploy/v1.24.0/kustomization.yaml new file mode 100644 index 000000000..8b7f5fcac --- /dev/null +++ b/deploy/v1.24.0/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - csi-blob-controller.yaml + - csi-blob-driver.yaml + - csi-blob-node.yaml + - rbac-csi-blob-controller.yaml + - rbac-csi-blob-node.yaml + - blobfuse-proxy.yaml diff --git a/deploy/v1.24.0/rbac-csi-blob-controller.yaml b/deploy/v1.24.0/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..89c2f1f38 --- /dev/null +++ b/deploy/v1.24.0/rbac-csi-blob-controller.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-controller-sa + namespace: kube-system +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-provisioner-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-provisioner-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-external-resizer-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: blob-csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: blob-external-resizer-role + apiGroup: rbac.authorization.k8s.io +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-controller-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-controller-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/v1.24.0/rbac-csi-blob-node.yaml b/deploy/v1.24.0/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..ce06d862c --- /dev/null +++ b/deploy/v1.24.0/rbac-csi-blob-node.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-blob-node-sa + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-blob-node-secret-binding +subjects: + - kind: ServiceAccount + name: csi-blob-node-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-blob-node-secret-role + apiGroup: rbac.authorization.k8s.io diff --git a/docs/install-blob-csi-driver.md b/docs/install-blob-csi-driver.md index 839dc42bc..95868291d 100644 --- a/docs/install-blob-csi-driver.md +++ b/docs/install-blob-csi-driver.md @@ -4,6 +4,6 @@ > - please use helm install method for more customization, e.g. Azure Stack, RedHat OpenShift support. > - [install CSI driver master version](./install-csi-driver-master.md) (only for testing purpose) + - [install v1.24.0 CSI driver](./install-csi-driver-v1.24.0.md) - [install v1.23.3 CSI driver](./install-csi-driver-v1.23.3.md) - [install v1.22.5 CSI driver](./install-csi-driver-v1.22.5.md) - - [install v1.21.7 CSI driver](./install-csi-driver-v1.21.7.md) diff --git a/docs/install-csi-driver-v1.24.0.md b/docs/install-csi-driver-v1.24.0.md new file mode 100644 index 000000000..c2e6d36b7 --- /dev/null +++ b/docs/install-csi-driver-v1.24.0.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.24.0 version on a kubernetes cluster +> `blobfuse-proxy` is supported on CoreOS(OpenShift) from v1.23.2 +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.24.0/deploy/install-driver.sh | bash -s v1.24.0 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.24.0 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.24.0/deploy/uninstall-driver.sh | bash -s v1.24.0 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.24.0 +./deploy/uninstall-driver.sh v1.24.0 local +``` From 6c75764aa886eda01c92b50b027b2fe6ef511225 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 22 Feb 2024 13:38:50 +0000 Subject: [PATCH 075/157] doc: use latest version for master branch --- charts/index.yaml | 89 ++++++++++------------ charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5969 -> 5970 bytes charts/latest/blob-csi-driver-v1.24.0.tgz | Bin 5958 -> 0 bytes charts/latest/blob-csi-driver/Chart.yaml | 4 +- charts/latest/blob-csi-driver/values.yaml | 4 +- deploy/csi-blob-controller.yaml | 2 +- deploy/csi-blob-node.yaml | 6 +- 7 files changed, 48 insertions(+), 57 deletions(-) delete mode 100644 charts/latest/blob-csi-driver-v1.24.0.tgz diff --git a/charts/index.yaml b/charts/index.yaml index e22a9b0d7..0ec29f462 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -3,16 +3,7 @@ entries: blob-csi-driver: - apiVersion: v1 appVersion: v1.24.0 - created: "2024-02-22T13:37:24.074374181Z" - description: Azure Blob Storage CSI driver - digest: 5cb79d7ed2379e34d84af10240e40bc6461ca5eb317f96aaf83984e4c144a29c - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v1.24.0.tgz - version: v1.24.0 - - apiVersion: v1 - appVersion: v1.24.0 - created: "2024-02-22T13:37:24.09898356Z" + created: "2024-02-22T13:38:43.33899957Z" description: Azure Blob Storage CSI driver digest: c9ecca2dde77d9557f3917513c8f5adcb861c2cfa9fe1e08f5bef056ab583efd name: blob-csi-driver @@ -21,7 +12,7 @@ entries: version: v1.24.0 - apiVersion: v1 appVersion: v1.23.3 - created: "2024-02-22T13:37:24.097977419Z" + created: "2024-02-22T13:38:43.338374858Z" description: Azure Blob Storage CSI driver digest: 4acef5e84bcbc01fa624b73f14d1331ee14b14e52b4db018f0037fd2ada015c7 name: blob-csi-driver @@ -30,7 +21,7 @@ entries: version: v1.23.3 - apiVersion: v1 appVersion: v1.23.2 - created: "2024-02-22T13:37:24.097000052Z" + created: "2024-02-22T13:38:43.33777157Z" description: Azure Blob Storage CSI driver digest: 057d6658c5879ee7e564d59275366521dc0a2e311c0527e570eaccd544622e60 name: blob-csi-driver @@ -39,7 +30,7 @@ entries: version: v1.23.2 - apiVersion: v1 appVersion: v1.23.1 - created: "2024-02-22T13:37:24.096228034Z" + created: "2024-02-22T13:38:43.337162249Z" description: Azure Blob Storage CSI driver digest: 66215f12a4e3acdcf09416d817b737e14546058b081a2cfd8bf9ef507229ca07 name: blob-csi-driver @@ -48,7 +39,7 @@ entries: version: v1.23.1 - apiVersion: v1 appVersion: v1.23.0 - created: "2024-02-22T13:37:24.095605967Z" + created: "2024-02-22T13:38:43.336524751Z" description: Azure Blob Storage CSI driver digest: 57151e21e33660522f25694bd8ae985e5e17c7ffe09904ad2af4025e8bf1da72 name: blob-csi-driver @@ -57,7 +48,7 @@ entries: version: v1.23.0 - apiVersion: v1 appVersion: v1.22.5 - created: "2024-02-22T13:37:24.094798019Z" + created: "2024-02-22T13:38:43.335903602Z" description: Azure Blob Storage CSI driver digest: ff3c2c2e05dd048dd0af3e5c7d002eae2928a5d17fb269a1e4d5cadd30e8ab51 name: blob-csi-driver @@ -66,7 +57,7 @@ entries: version: v1.22.5 - apiVersion: v1 appVersion: v1.22.4 - created: "2024-02-22T13:37:24.094172861Z" + created: "2024-02-22T13:38:43.335229669Z" description: Azure Blob Storage CSI driver digest: 6c38e79d2f50616daac0658cfa5b1a569e6ff8ce8f24ed40f563e87fb1d1340a name: blob-csi-driver @@ -75,7 +66,7 @@ entries: version: v1.22.4 - apiVersion: v1 appVersion: v1.22.3 - created: "2024-02-22T13:37:24.093547882Z" + created: "2024-02-22T13:38:43.333709892Z" description: Azure Blob Storage CSI driver digest: 6cdee296d22ecd330f477f2ca6da51b07320c546c04ae46c23eef48146b772c1 name: blob-csi-driver @@ -84,7 +75,7 @@ entries: version: v1.22.3 - apiVersion: v1 appVersion: v1.22.2 - created: "2024-02-22T13:37:24.092935133Z" + created: "2024-02-22T13:38:43.333082873Z" description: Azure Blob Storage CSI driver digest: 259e66dc12db7310fe1c51e49c964398e0a6b7d511133916dd7d25f748f0b791 name: blob-csi-driver @@ -93,7 +84,7 @@ entries: version: v1.22.2 - apiVersion: v1 appVersion: v1.22.1 - created: "2024-02-22T13:37:24.092139192Z" + created: "2024-02-22T13:38:43.332470917Z" description: Azure Blob Storage CSI driver digest: 8329d477d55c82f97bb09fb172c5f39a1677bedc13c7410bd93b306194516438 name: blob-csi-driver @@ -102,7 +93,7 @@ entries: version: v1.22.1 - apiVersion: v1 appVersion: v1.21.7 - created: "2024-02-22T13:37:24.091321566Z" + created: "2024-02-22T13:38:43.331849146Z" description: Azure Blob Storage CSI driver digest: 1095721182d611e2556c611dd330758d8130fe66493db4f9189586a9219896d3 name: blob-csi-driver @@ -111,7 +102,7 @@ entries: version: v1.21.7 - apiVersion: v1 appVersion: v1.21.6 - created: "2024-02-22T13:37:24.090397855Z" + created: "2024-02-22T13:38:43.33123788Z" description: Azure Blob Storage CSI driver digest: d5ba1f92795ec45970eb6e5fc54aa13a5684f9936216c064f8a3843bf722bf54 name: blob-csi-driver @@ -120,7 +111,7 @@ entries: version: v1.21.6 - apiVersion: v1 appVersion: v1.21.5 - created: "2024-02-22T13:37:24.089053236Z" + created: "2024-02-22T13:38:43.330670488Z" description: Azure Blob Storage CSI driver digest: b403e9d49abfe076ecd83d6dd50166347ee4305f33dc840019474b2876723b9b name: blob-csi-driver @@ -129,7 +120,7 @@ entries: version: v1.21.5 - apiVersion: v1 appVersion: v1.21.4 - created: "2024-02-22T13:37:24.088260575Z" + created: "2024-02-22T13:38:43.330079576Z" description: Azure Blob Storage CSI driver digest: e4fa13670caf6b0d3e9fefa55d100daa439cd7187dabd45318ab03c7d4b17710 name: blob-csi-driver @@ -138,7 +129,7 @@ entries: version: v1.21.4 - apiVersion: v1 appVersion: v1.20.3 - created: "2024-02-22T13:37:24.087502317Z" + created: "2024-02-22T13:38:43.329453983Z" description: Azure Blob Storage CSI driver digest: 8c2c20547b2e0e1b39d2f2efd04c1bd778f14af5feae2bda86d722dac3c02643 name: blob-csi-driver @@ -147,7 +138,7 @@ entries: version: v1.20.3 - apiVersion: v1 appVersion: v1.19.6 - created: "2024-02-22T13:37:24.086185619Z" + created: "2024-02-22T13:38:43.32837523Z" description: Azure Blob Storage CSI driver digest: 0007ef225b5658d3989aa6fdc3a91a4b33696a438eee46ad9a675af615cbdf21 name: blob-csi-driver @@ -156,7 +147,7 @@ entries: version: v1.19.6 - apiVersion: v1 appVersion: v1.19.5 - created: "2024-02-22T13:37:24.085599248Z" + created: "2024-02-22T13:38:43.327646408Z" description: Azure Blob Storage CSI driver digest: 183c3e5cd84b709f1455cc7c84ed5bd573e8a24149fd6442d38999835b0a1711 name: blob-csi-driver @@ -165,7 +156,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.0 - created: "2024-02-22T13:37:24.084845596Z" + created: "2024-02-22T13:38:43.32640302Z" description: Azure Blob Storage CSI driver digest: 3eac15488da5be7d1e78431929f7cda35bceb1af3fe107ffbd84606e047c9204 name: blob-csi-driver @@ -174,7 +165,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2024-02-22T13:37:24.084099665Z" + created: "2024-02-22T13:38:43.325246938Z" description: Azure Blob Storage CSI driver digest: 22cfa17fc5e8d771ff8edd26729266a9a8ee55c0e150df85ef15698f7fe985e9 name: blob-csi-driver @@ -183,7 +174,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2024-02-22T13:37:24.083286845Z" + created: "2024-02-22T13:38:43.32465342Z" description: Azure Blob Storage CSI driver digest: bf6249c0e3e3d3d009d4c79ceb7fda9a56c0565b969de753628792ea3ea5ece8 name: blob-csi-driver @@ -192,7 +183,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2024-02-22T13:37:24.081520549Z" + created: "2024-02-22T13:38:43.324067023Z" description: Azure Blob Storage CSI driver digest: 8daa35cd4957695cb64b45da05a15b4020df5545a8ac44c4668dad4bba82c8a9 name: blob-csi-driver @@ -201,7 +192,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2024-02-22T13:37:24.080610198Z" + created: "2024-02-22T13:38:43.323452746Z" description: Azure Blob Storage CSI driver digest: 442bc579b231aab626b9e474e2c0ed3f101d47d61c99aa9a7f863af7ce268d9d name: blob-csi-driver @@ -210,7 +201,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2024-02-22T13:37:24.079727411Z" + created: "2024-02-22T13:38:43.322864744Z" description: Azure Blob Storage CSI driver digest: b577b0b771138109aa90eb09d56fc07273ca0b584a263ee8f789e35796279f31 name: blob-csi-driver @@ -219,7 +210,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2024-02-22T13:37:24.078746878Z" + created: "2024-02-22T13:38:43.322285362Z" description: Azure Blob Storage CSI driver digest: 124e87af2581b374b89a39940698620c23d3eae6dcee518d302461ffea93e9a8 name: blob-csi-driver @@ -228,7 +219,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2024-02-22T13:37:24.07789702Z" + created: "2024-02-22T13:38:43.321685494Z" description: Azure Blob Storage CSI driver digest: 07c4d76017491b3d0bdd70de90e814096938bf7916da0c149c3805294bd57560 name: blob-csi-driver @@ -237,7 +228,7 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2024-02-22T13:37:24.077044763Z" + created: "2024-02-22T13:38:43.32108312Z" description: Azure Blob Storage CSI driver digest: 79716efa958385adf57eb3570843e1b4512d8c801e8e070625e94264f3e917a9 name: blob-csi-driver @@ -246,7 +237,7 @@ entries: version: v1.10.0 - apiVersion: v1 appVersion: v1.9.0 - created: "2024-02-22T13:37:24.106306714Z" + created: "2024-02-22T13:38:43.344668206Z" description: Azure Blob Storage CSI driver digest: fca0b9215d3277346f68c643fb3ead75158971f0d1945ab01ec559196f3cf842 name: blob-csi-driver @@ -255,7 +246,7 @@ entries: version: v1.9.0 - apiVersion: v1 appVersion: v1.8.0 - created: "2024-02-22T13:37:24.10548327Z" + created: "2024-02-22T13:38:43.344048475Z" description: Azure Blob Storage CSI driver digest: 3b78e2ab4f33577c54d4f57276c824717d2ad2aa3741210e938fcaf927bc751f name: blob-csi-driver @@ -264,7 +255,7 @@ entries: version: v1.8.0 - apiVersion: v1 appVersion: v1.7.0 - created: "2024-02-22T13:37:24.104677087Z" + created: "2024-02-22T13:38:43.343397477Z" description: Azure Blob Storage CSI driver digest: 28da5b55c3d2689d6da85eb7da344385e9cb99bdb2af18c24fea93670abfe7ea name: blob-csi-driver @@ -273,7 +264,7 @@ entries: version: v1.7.0 - apiVersion: v1 appVersion: v1.6.0 - created: "2024-02-22T13:37:24.102887936Z" + created: "2024-02-22T13:38:43.342587905Z" description: Azure Blob Storage CSI driver digest: 6f24f2e6623f6f8862e47d4fbdf13b5f351ceec6bb9a4591ef7fc2fca9fc1eef name: blob-csi-driver @@ -282,7 +273,7 @@ entries: version: v1.6.0 - apiVersion: v1 appVersion: v1.5.0 - created: "2024-02-22T13:37:24.102110093Z" + created: "2024-02-22T13:38:43.34103753Z" description: Azure Blob Storage CSI driver digest: 95d14c9b70b319760d388ea47727c8c97e9287867a8852aeb67b7175b52fe8f5 name: blob-csi-driver @@ -291,7 +282,7 @@ entries: version: v1.5.0 - apiVersion: v1 appVersion: v1.4.1 - created: "2024-02-22T13:37:24.101126167Z" + created: "2024-02-22T13:38:43.340523852Z" description: Azure Blob Storage CSI driver digest: 5fcf69c449f065fa1d5722e5a7fed8a28000efa790907e9ff4b552c5fbd16d22 name: blob-csi-driver @@ -300,7 +291,7 @@ entries: version: v1.4.1 - apiVersion: v1 appVersion: v1.4.0 - created: "2024-02-22T13:37:24.100368414Z" + created: "2024-02-22T13:38:43.340000695Z" description: Azure Blob Storage CSI driver digest: b466543344a6411f6130ba87b093955d39ab8614c6b4ed8505a0a0c96073cb33 name: blob-csi-driver @@ -309,7 +300,7 @@ entries: version: v1.4.0 - apiVersion: v1 appVersion: v1.3.0 - created: "2024-02-22T13:37:24.099690475Z" + created: "2024-02-22T13:38:43.339467398Z" description: Azure Blob Storage CSI driver digest: 58d02cb70a3a966b349d62e880b7149fb06ac009474e35e580784fd3c98a5b07 name: blob-csi-driver @@ -318,7 +309,7 @@ entries: version: v1.3.0 - apiVersion: v1 appVersion: v1.2.0 - created: "2024-02-22T13:37:24.086774045Z" + created: "2024-02-22T13:38:43.328869209Z" description: Azure Blob Storage CSI driver digest: 27fb89f20b5fddc7329e6d7c2374857b22c1d61592e397a53f47121eea68c344 name: blob-csi-driver @@ -327,7 +318,7 @@ entries: version: v1.2.0 - apiVersion: v1 appVersion: v1.1.0 - created: "2024-02-22T13:37:24.076165071Z" + created: "2024-02-22T13:38:43.320462033Z" description: Azure Blob Storage CSI driver digest: a251a55243de207c69ef53f72abee45e93b72fa4fc43dc204b7f1cdfd459acdb name: blob-csi-driver @@ -336,7 +327,7 @@ entries: version: v1.1.0 - apiVersion: v1 appVersion: v1.0.0 - created: "2024-02-22T13:37:24.075267487Z" + created: "2024-02-22T13:38:43.32000298Z" description: Azure Blob Storage CSI driver digest: e83f037a165eafc83a978bd7e6bf6221b052ac34363aecb12e6a73607dc58b89 name: blob-csi-driver @@ -345,11 +336,11 @@ entries: version: v1.0.0 - apiVersion: v1 appVersion: latest - created: "2024-02-22T13:37:24.073697222Z" + created: "2024-02-22T13:38:43.319662364Z" description: Azure Blob Storage CSI driver - digest: 2bd22695556c9625b6f11056cdd0058af1e02d6e85917944efde0bfae1bba3d1 + digest: a8a337da1fb52e80e8bf9e7cd1d6da9179de91c7577a50822547e476324efdfe name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2024-02-22T13:37:24.072874067Z" +generated: "2024-02-22T13:38:43.318844561Z" diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index f0c8aae12aaf4f7d6a2b9a97513c39cfb8181f86..f8b67aeaa48d507933b1abc8c7a6b0305d7f023d 100644 GIT binary patch delta 4406 zcmV-65y|e+F48WLuYdI9^!QEt==8f*`?&q)b9}DFz5EAq6T2H2x9o(O!VV`^k zE(oJtv}o6`@2~!Ju>>J&4hWkQU+QP)zXu%9TbE7HSTc@hfG)-ZWQ3EkHCnZo9`d0jT6X;}up5BbuL4-V{@d+#yOUl2$DKEa^?&~yjd*wl-5Np@EMNpa z>mtwQ4UgEI2{x4tn|bQ3%O(m6Ar3whL^8hYqUPO?qG=1#jHF0INIY995B0Ynxb8J` ziM{Ni!F0&vH3tDyy|e|n=4?TfegIxTfCXM+>X|y|=2~y@jPcNs4!+*kP!&- z`!-X|O@{=ncH>`-qxGa}w#x!1+$OGriA(GY^`usN^$Lw(3LK~nNHWAIAjXl$I3NKw z2PxF!37|lLgTBmxYBGe*kd{%QP;i)b(OgPjbejpL8-Ii~9cGKB#NtjgZAPbL7I3h9 z=2$kP5)%r%sqk4EndNh`z!IW4T{dg_`E}sBW5Z1s{qk$=6|#`o)VyW*^_CxCX+Vb9 zgIY}hUl1G4ZJPyDs`<4!Uw4Y`dJM{{r@!zVG%aK0(~K_tHx})IB3c^TC$*l z#vs*nOnLJq z3n9PZoJaH-*V7+LZ33N^!yORS_ypV8Nm1xmmVy?I3&p9eObmq@Q_3V(e`%S(j-&r^ zT_op#eCCLn&z$oZs*4^T^JrW=0e2mSOATN;BY(?67lDw>%k(2uGUS284wkrPYmh`o zgkGV$ABBZ|KX3Uco-Sbl+D^jgniIx}Twb_XX#7BhGN<&%R7@ce@C8`_;viVSZ9rnq zxU4GuHlsH9GF||BcA9Dt_#tV_=K!8I=9)D>$+rOiyjp2bMZ+iQE#_eZS+{n13lJii6yZTwt}UhUv93t?<1xG1?m~rGVQ1dF013pj(F^WEp2T}JLQWDFFEv^}tUDWy4>CtiV-ZuyA z%6Sadsp;*(B{mjar2h)&75#{nJammhiXL7Vey&|iA%5kunUZ<8i%v8yBgw*$D}UPX z6<;2aBKk`)@9Dkiq}BRJ!lxX53qT~HY(G#^;N`k?qFx2#EhpA~BC*&+DyPwTB8}`u z{ zuY92IhzrBOOZ4icrcdO z^9NLe^hiP%d&>U02JR6dj5XAH^X5%n_dcsOEIswSumXN65&S!yiZ58&bBE2o?ud3$ z!iBoXtfzCu`bT|AF>+U%0)LhP*Rn7b?>P(n=u|fX?EK7$gez);QUUMAH%lK@xZZ;_ zdSWcGIx87jtQotvn=6F}eQT=K>w&vh^ z>aFhoo{f7SE?1hOUPrIcN(Uk1xl)gRpiC(g@sHQ2*MIxz{cAKBzJI%Vjeb5K4F|*b zuTg(Ax*91nOxe<*deKa%2l3xEm(bu|jYg?b7i1ST7nnC)GHHfSYI;pe&br8f3Bj~h zQ%0T3u(MK{T1^w6a?lIOyh6V)ZG&U?S2UPvBl`Tjrxv?1yvzYcpP;yfg&;Eu4)QR? zGi3;dco8ucAfdXyP=Dt#2~-^tvCxk~D_#?n>J%!}sZxUsdgu$0a}Bl~)sV!Xr{pih zrHA#>o~%RLrDgrU5NLJx7r#1@5$+#&cXlt9PLJ_o^?ST zrDkT~snh}AWlPY`6z#MKHB6&cyJ5(lgMGIFnelQ+ECcZx`F}280qQK-t%XX~mM*d^ z8X2)?4-cp@0u9$#nYQThQL$u@S(XSl#j`s0{fxb?Fy)Y-$HzLdEWL~WudA){^?y4D z*9R^d(tlzoXv6yN9GxC#*8l0rA^!V3?ct&MPqZLjR~rXY;(}cIaOSCLvFFf5|7<=! z)@mB8fA3@JXn&i5hF)N8X6Tdw4fpV1AqS>}0;+4YN z8$WNOR?N5tiHZ4~P&q~QzX|JaqMjar`G0s)M1N@anSat!_5W8#_1pR`gJGV>l(}XU zn}O+cZ#NCw(}kw&|GFp5-hX-+39oZh}wsl2F?XoU+lB zaAq^lXH*{?60Y_tIdfKq}fPczbG9e86zGyDmwJ7`cbE;He4DBLZK>q-YuH=#u+ziqgy?cxta33Y%&NAC@Z zXcy{+?p$flFB5E5X0(Xa{0n|jqn0W1IOmVUWFI6Y*$6S|KYiAu>jmWE& z?0;h%eGzCQavL?ZU%oDRu!@=({GoO(A-M|DM;bh{k;U)V3?Z=rq){g_2;x48ME1s^GL@g~SePTwT$^<;9hR=iEBRn>izfNF_F{J$Bs}G+x!GDyGFz*dI+HD~G!8b3!! zokzQ96i`V#=$RAb+VLf#A~E4qsAP)~$F^?Fla&bFCNmMk&pgNCnu9*!HS6hR)qhCm zKQ!V*yrj|!*J&5k6O(KDM_HQa@iF&q9q#TXXo+X&@v&|uz!gxDpZ(EH?*j;7J`+hX zP}5$w7H6G$?xD6{vEy(M;_Uk$&U?L4e?0z20iX-m0d7H?SObb|=YjG=FJP_}wyT z#1)i9Dqz1~U9kpre%YphchnU7qF6;wq%}gBKpIH>HC8M3NaO%ax{Bp9F3*Y$n>4p^ zdP~8~td;*Xo?XO$jex6GzB+ag{}i_yrC+D|R*-ppGTE!B=P~BxO~dRc*t_Acw%u8a zGLZ|jAd2@NppMT7m1p1oaDOo#-1djP>#M==rf5eDleSiiMVDwbTHRu@i4?D5%EpVd zin@gu%+u&dTC{;{hoogv4`x7RLGMi3NHL-qgC(?=y#P@r)1-tWooQzhsV(FDw6eoW zOiW=O-gcN{E2AR%H3o=I=d-}`n+VS;r=u?|&)k5v}%T1Mz$2 zozIzMHgmh^=Hhx6Hqx?NNI3+lNVzbd&a^2eZouJY&OywX>zK60xXq>X--GOCDH+vV zG68yQMehhDl3=$7E?$noX4DZ~bks`0gd5!Ibvymq2Tqt%+_~Hg{QsXu{o9LCzt_C!Ave>8%D7o)8o8_iNxV2HpYMVI64Atvd!HXF9>E`Boe0x2*`uBf|>F2dr z0&fOs;r_CL!zv7J&)+uerG@JK)$O?V;r8uWJ`%Wvt(y1^E_J^MbqpRV^{_sYN{q_0HPbKFekanV4r>R*KijF4S_^)7e^8{_S z9#@gKXFD@xJ7)%3h$vpm`~50HxND4E#hDOp(*`cH>YW32uBf{#&=2cZQi56STWpw2 zP*k6S!#I9kR(})7E&ne25wisRJJPhVC3zrZQ+p$9nfW5yPs|Qy2qm){j}FOd)M_ra zutsL@?zKX^g>J40a(B1d6KPdDUsQFR6}sJ?!dpV ze_4N3H22^7VLfKGWVsne3!L1A{OOa0E<$<8uh&8qKVWCV3?^-MZr(VI`@cuHj#n(N zxXJWd!R)ki6szKmJm&Rsc?ffx-C=L+lft~V*Pm#!GI~N9X6CAvB z(!^h=RmlGdP3zsSgxQq;e{y=7$^U7!JI9l852FH{wzC!x{RDrHviE<#=^XC=ewLOI zVD-@(=3+{Haer9+XMW~q#9T-f2x({>$T{QWcfCs`>IT)e~s!c5&BDvq<_m|y`^?l5!VXkMeb zUq){8uxvrpX0_J=Jqe-B7oqdf(o4NehnuMfw zFZm57{{>rytJVxXru_)`UqC5kCZe)J+N(ZNU!?SF zttKZ*cX~dh6#lGLiI9gf{cFjD>_LLJo0tEAJ$5HSsUp)44~N@>3R>0k|5Gjj*>L`E zA02hF_y2z!pBx>||Ig7jod2J7BhYmS-_jQFLW(~*58sN)PhOnxEXuw>7!GQF|E*mA zhXR0&06fJLyh;DJa=-t0+&cJwpQq&l7->T<-U%|9i-n40?Fnsz4Y~crQCld%ZSN+k zb}v!t@}G6x40?+=sR>!j#U_p=P+Wy$1GkZM65T?>rr*y!sMfzu+g|@~e-hZJ|4-gz w^nd5%^zi%t&(dB@|HlU9LGvFp|Dhe)p&i$H8;ZhzNmAGc55e1}^5W1+kSLSp`1>&djTgZq;-?32&H z1!1&{7VR4L{nei?mLO!!0bz6EOa1Kp_kaU>>#_+NOUCgG(8YLwjPTSb_MnS0B2`}3}Xrb8yLIS8QYr7g%cXA7eA1MmU@Oo--PFab_Mf>0zBJ5>Id z5aOD%2~?vPb${_ymjt~|qutp|kTM5)dU1_lMug;ei-D@n;nK8t)IH5Yg)!TLj6jgz zx0zyYIwWYd8~{T z$Fdofm{8zNh0oH+ET5AFmJrS9vRTv5uLIW|8*aMjmtSkIkcG^q<}JgoxBLK012V)O z)M^6wg4l3w+bp0`&9BV?OXwoW13<6P&uS>@|F2O9i?9I}=nHXOHD=SrK~u)jk_8Pk z2C1fFiho@Jh)mT(^aWG>Fc6^r=7@cv4MmSp$(ic{@2W3tnZ>wzXbFs7A;m0WQ#1}) z2>A`?JfhFIp8im36X>)Y?trMqC)mzTibB7#6trkuC{Arz2S$`I~2!v!_rXQh_ArB;Wu*5Z6gCsg4 z^a|bmC@k#zdCN!fbO{U4b`nO{oG?!0^1{VJ;|D5~Ii)|QVhV|XFUSHA2f+ev0}^w_ zWmW078MVQe@dD7Z(^QMV4@p}-2k^8p*R1(Teunysh^3I=S+*Mp3EZ+dk;KNXqCN&Y zP=C-`-r!!Mnc`H!=0g5P>HyI7$h`j3bAEnpcS&@PRsrQS@;Yk_v9ahP{Z~M*=tr#Np=%UU^zg#)bM0aZ@hg|jl+3$bbfR$?Nfw4&(SL@o z`0|Jp(O-&rPw!19t=2~pKIQOR03r!x`+<@IFW0RT^(q)|IkEN=iNz*TIgQQ}X=FF@ zuQhdeIwfj3hbm5K|1l`l%6RxlG8cHLeKi&f-NCYp>UyW(%!Ni;LwQ<)>LEqutKJG* zy?;-H6wwQqPQjL4R3EajJqIUnp?_Wpf4De&C5&I4iuD9CHNCaw)slMrVhTyW9Z;80 z_{`kEgAcGgw`~yOXCmhV2dLEI5G`^K-+!Wbfr+cMQx~<1RL)5?m2;F+8bB4+w=SDZ z0|D2Z-7i<1PEt=bLPCYat{cIxKx5#QPov&$95+ttMcq0D{W@{SPNUOknSYJ~8a<174TDu;NR&~e8JM5J8brKN3@d? zF4RS4J)JAoKk8G8k-ORyuzw7=mW8Q!&spF{r@9ef=VwkNTu~d83V1iZS^BWT^&X_r z6Jv?hS;@#^&Dg!&l!4!{1!-vY75Wm44Xup0^BE{+>War8`gR02jH3!#I zZ*~9oY~1^BxzZH%I(mgxItUrhm3sUGWlEulf4oM${@YLQU!%eB-G9|<^z->>I2gWv zjrya})kv9P%9ak*i)KPSi2ts+ga-F&G)k4aAiJo!z`W^_Ni%#>(`#CC)78v_+4PiY0^0vP8Hkp4GAMXY6%_DTf3-KGu0SJPU2T=G|JymZ zK5)^H{u4t%8`gj4==3u_4EMD|HG3a`a`?Vlz*10|Gzq_-_~~-4D&pu%r&Ff zL>ZG2fbmBgY{fdTiyj_Ow6;^BBL#O~*e9WcArdy9Zw0Nz_GClSv;1TgYjlTT(Drce?;VS<@Vas*-1N{~- znVQwd$A6kS_cX-3sl~8p2%&s7lI(;lDH)?MuaRW`!JfMYVnsOhWzDkp1J_-}=%$Rv z&V7N3zq22Q(Gnj!<}e#;Q-pKzH{FS2I)k4Ckp&i$5J`Tk!JFWE6I zWc~i%X?0Fd())j>_2$jN{(qKsN2t?9J#E-~K!0T|nGlA3Uo;o(S`_|_9Xo2RMam== z4;pR{1}bzzmznT26z&$rb)^HSo6sVZ-!|OUcJT+IggQVaI#%Gq2OWBj!&$}XqxXhH zv z_J1*sz6i7txs96IFJG5DSVc_?{!lxYkX!}nBMqL}$l~{FhLG3*(x?*|1aY54@&$ff10|_u24ndNR3LE8ZqmE81kUBh2+q6EFF6sfc#5 zL?%Gue(Cg?VOF`MUnj7|7*c-m)rU`;V1G<>k#^>m30hJe#PRNgAgvE;hJM}D+>}do zmRK^5UV&sP23bORozNvGEMp&T610Mhk;KQ!y-gao(dSNQYphroU@OD^nlp6$HpNiODtnqbyDI_?UaQ4tIAGw8S&?_*gd+;0mb7&;Dqp_W^`3pNXUx zsA;cTi?dEW_fXrf*l{=rarXTW=e^#jKOX<10MG^O0Jk8_659+>!4+A6L`f%OO1RI*yg20y`sRErx&@b`uszyOU%%nt!w?{BD^v z;tI+l6|mp0u2_RQziiXMJ8Fu3QLLgT(i)*mAPuDc8mkq1Bys>IUB&VlmuJO>O`6*{ zy`^Ag*2;ex&o1J>M!;1oUmZJ$e~R0U(yvo}E6BV)ne0{6^BD8;reSsz?A`EJ+wQDI znaG7%5XJisP{(J4%Cql(xPKTAZu`UD_0?c_Q?w(7Nn5MMqD!dM2c52W#dI! zMcqOS=4o^!E!x1fL((#-2Q#3upm!#1q!>|*!4lfbUVtc*X;Q+G&a^X$)Ru96TG?SG zCZ;eCZ#&Gfl~ED>8UsY9^I72ea+#u6g1#^}FoRj?znp}wtYeeS_kR@ih*tZvf%rZ1 z&gV=ro4H+db8)>38)?}sq#S}&q+FO!XWA4KH{ftH=OE_Hbxc}g+~!jH??HC6l#FUF znE*YuqIZN6NwC`k7ca+PGwO&gI%=h0!VPZqx}ARQ11HQW?p$sL{{K&-{_Vx6-|G)= z2IrTz?*^CsBBSsfXMbLHtomp*C9{v%{{YJoO!ENIX9;s(IXIR&92UUoq+K<81@EFv zt3;A!b|6V5S!~n>l-zfl&2m#B+}bE)wN0F3hU)dK;6)7lbaQh(zP%n@{rf+~^z&LQ zfj5J+aDUmrVHF0q=WiSK(n9tA>UP}waC>?6{`Ru}xqn%eYJWaYuw)wGw^&Cgormf= zab+yAeW#UR=~Ng7`@_$J(be!{e|V$C{`&mpr;_s!NIOxj)6^^qMMo2E{8uo#d4e`u zkE_Vrvz?i;oihV1L=>;({eBf8+%?9o;!Fs)X#WWA~X74Vw zzpTG1n)`44upYBovfK=#1y1fl{`AR07oj}l*K47QpIMr=;1s*b%T)En zx4YL5a<#O(`o{sVPjtzj=%Qch^v)g*tx* zHCsVN>{-h`i9*x-yj_;ij-x2nt4~v?&#s-6d_T8t+K+($1(Z@|A}TASz3LaJW6FpjAEpKjjjT4d?&% z(NQOR|9{W%(aGWb{~T?@`Ttor0$qpjEo}iWr1+Eb@U5u)t%Lvfd0H-jkv8<=ogkCBSg1(Wp3pYfklSAzwS^Mg_HLqT z_Y$Qp|5?|~ptp#Vnvk_zY~ok~#Z@>qa2rV{(Lz0J`u*I4YW?f9?e+imCxMOn|Kv?Z v|94J~55NEaEbYbge{4`5H2*>KAKIZE+MylVGq?W>00960p}xK<0H^=}TA{w6 diff --git a/charts/latest/blob-csi-driver-v1.24.0.tgz b/charts/latest/blob-csi-driver-v1.24.0.tgz deleted file mode 100644 index 63765c77af83794521818c89655d5240649206a2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5958 zcmV-M7rE#kiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjbK5rZcwg&NV3gim@BX2l{E(|={KB!6R1-(`NKSh9?{qv6 zxsq@u0TuuitFFKMZvc?uvqZ^?o%V#jXiP447vN&C*av9hvWaC2VmX{FfHyDZn9Ifz zd+v98YpqtRb$WcP|KDo0vj1R~`+nljM(4QEsyQHRPJF2!p8pM9`vEDT6X;}up5BbuL4-V{@d+#=QO+ikB{1i_5U1=cz6cg z8bT8+U<5wvBG2XxkJy|EHkA#VdFrjpCJG864n7k^GQRAhCKIB07fgUtkRX&CH6wx2 zz!J~mRj(fSf$Lr~m)OfL8cc^wUULvYWep))kZaBsM5zkk)rcv<_2W(nn#YaSW|A&h z00kkgIh#N=icuF|bxF|cG}@ib1SxZ%rx)4?W<*Giw-~7E94=FfN8QsbR2Z`@$Or`a zeVZxfrbFtJ{@6HLPpW3SEO5dd<2snQ#J*5ZYPDCd&Skqy)Xi6;ZMAK$;N@f8E%V&;d zGb%Bmz?%x6rIA@aCkregn$u;ork`I2t~)l|bkQ%r)?OhCnf=XMhF@>_0hR`2h&`y) z1n>p1;oP=aK&6^rn*)~6MUn@AUZJ1WP}IL)qYxHh11!)N;<{?gri+87jH4wB8fXks zO~(|w0uY(1hv*BY`e7hI{ml{kLK})6qmnb%1>RL(+A^zg_0SR+y+Vpv#HMH*vJmnc z&Ur+iaXtN^)F#krIottJjZd(hofL(BWi)8fxKNzh%Gyw^S;=u8ZUx zkk1@Z^OEGzvn6;ntAd_fj~I0zPS8<3bYE~`qv z&8Q8&j2D2Oou*m@en{H#Ie@3_x@OH!@-x(5L@b2_&$8V>NZ^*ui6l0574coCt@`}HVa%wEhvWw zJaNHMN+4z&ISkaiO5}hK)Ip4*kK;kqy@r&;a$<{X#$^|Eemp%oF5dg*fL%F{!8$d) zJ-EciqKouj0llIhv66?bQAp9l3&YQ~iz&pfTsBiO?{?9N#$_Z~7;;4$zT(RxQbd0# z<~_YPowQmXN%)k*ZvluTlV4QaLBpRL)UOX#iDN-@0rv4Fp_scE4P4 zI!Qg%2niJuyKV%(0*!%J{*8LOaojkm7j^3t^y|bOJB?1GWjYFwAMyQ_5A+>zVHkLc zo?hlf{YP^)G(AIY{>gW+gx=fSpf0dIhife7UDWin>5hG+-*1QqV~IU~Ks88@By_Q- z?5}I!9udM=L#;P&-sE-fvueZAQ_l-4;HMJ7ztgGsf~7ro*zD_$XeT9HsEf>cI#;ZJ z)Tb09ceN>C8E`ENQ}Ld&z>iLKBf!qjoJhE$HYgSFZhW)!VTJ2GNTVml605V4k;R&^ zd%GzEzhMi~(CRDnD;OJE8FA+`P|Va7k3sbJIG9e!eHYcQ#$#&^uBYDW{_oki_u+D- zDe86f3axYyGF~k8_y@|ALJ|LXje7mJpWeSlgW6GEe?^0-HlokZdup*O!^<3C^a+YvSO_ws;2;lEJX3~Xh!+uK0TQbF z3w17&K-D1;3;igx;x$33PN71bDmBQUhrSRw*I?UG4M_}oO8!DzdRQ;*)jG7QE$jc4 z;Q20=AezIgoBp^V@8y$eY>NLJx7r#1@5$-un}h%NIogd&mXLaoR85><;${|6Ik9A~OdguOMS5@Wf|8@?p4_q{)|HM$xhV|b$ zIz7&;|I?EP6D`s4S#Ke3~ zsGOqu--PuyQBM!R{5w1yqCd3DOlhh5|Er_=ZGD%)FwbMkL^Fy_lrb3r7$3C3R;&ZN z=-~lHYdiIA8mO?7J`hvebj)hca+Zz1yo_Hr!KZpjsO<($*=R~Qvzg~Jst*eZ_j(@A z$6Q)$y7kCIE7zJV(_@c<91-u+=*yq4awduquF@YAwp^D#&~E{gsabt|tf{k3L(H36 z42y;k%4Z|VPPmeiaS8JpN%kM?xoaR+gi~MEEPFq2-Bny}%6RP17pV9<`*G+j@v&nL zv+*@WI2Uu%oj9g5SlbV+!g6YQOGc9(*h}l&Z>g2<{{=CU9n(VA@Bf`vC!7D(X}4Mj z`~O+m9idJa^|WE{0hP66LKyaa(Ok4^QOGlP=BTw6DU(<{Xt+HXsL=IXX2RD{xLX)^ zl@6M2LW@v-+i+Lg#UF?g>HwAKSb+;4bhtSVVHKl~-Ww9pF4PTOxYC|qCfKaZXc4RV z9WG}1)O3v2Ur0VN@!GkV;$#b6s!rU$a$RDropok4B2QMbk8$orppD2?)YQKCy5zws zQeyCj+PQ?}Dli{u@XSUQzh5(i#0HQ?oyZ!9`y>($x&>QjrrRGVNi{PdBY^6m_OBIu zq{ywCARjq;leE{9$<12vHmO?CCYv2$u6LSv$)`(2v5O@#0TTC1r_2nq$|e0efi1?6 z@`bNHeA)zKqKmWxw@lEI>L8ABCj@DIC^Pisrsk$xsLS`Aj6mKuvqyTAX$2xrf?*#g4;4h_mm1 zIPdjF{qgu81%NJK2e<`cX4qzk3a-ckBuY|QAkW&JR!bDThkMDfWzyiRXpcWjZ;mTq z9lFOpv3~Q#*-2|1#3bNCp0!&=Kw*5(nknP{>?cMUzy3S|+r`8)k-i)QKo{9BU<5y$ z;nX{1f^J>Z46{ZXg4uUj8M;nni*6E+II}b@84De3)WOS~gqZlZ37SVq$Z<2=E51Nl z4&jN{aeN#V*cmBnF$B!8o3KFIog~B2q(vcj%cKrhP!_2m{eE@D8r1n^n+D!dQ|yak z6+MyG2xS6kAobT+t=J=x128Enme066D>iJ>+{P&_1v9f&{?mAN5&ty;u3Gu(*g^bL z+-{V9o!(nP=Jm;BucDsEn3p#Vv!h_|hQHc&XD!M^F3fx=-hY5PJ|k3~egDJ7cyQYv z_O7o6!<(WVF-+82EtXb8|JN-hn@H;_rfj@OtEgLu!90zQq(vLJc1T(#-Czb(7WB@f ziWDP?F<3%-*$WV5vP()h(%E$;dD=40Pb)jD#KaWl-ff3DwlXTBUt@si6g~?)UoKPh zO3)YP17M`J72+Gq;OwF0OZBBQ3jyltYk;lnZm{ zOq*ij1{`kY9K@Wtj!9*V+gwWjJ;-jBl2Oei6QIXd^o~#>33hwn;^i1@Mjg>bN39f0 zxWTPnx6`kE;DkBFoy*O@|Nm*!zr7gsd;Q_f;QaFT-QcocWE8&R%*&2dAFZZj_7VFZ zU^#+m9w7QGVZJK|$5MyG0yv$ht7fm@U6fswNX*O*B&j5ejoN^c`);#YZc2n(8-=X4 ziSx=(y`B}kh=HGOZm!3-*Q2X{|EHLKUW+C0W{?){FB>?l!r=D&ZNpw#sNP@Qj(Z<& zFR$L;UiLrtFRN0`=LVKc1N;{22&MB-T_>)LCAROh5-gnx!(e~-c`&*fe(Vo#wAf#t z-~3c^9s+46Ds-BfMWN_u!j1n5MmJB;X6tbkd3&}qQ?_$vpoNIywY=Z2B80of*j1bf z;WlmHGOOM>VCRau%L4tdekCQC)xO1s$^Jz3DL9Pd=Vdj4-16_TA2Cb7zavc>TapJt zHnlgxmYFZI{lx5WhEOuQ@#v7OK&|Ft3+rO`?p`azTj=JBAa{4GJ&{(m^F>w1S)tqQ zIh<*Kx8|5SE?7o^S%d}xEa05+Go5Gt_m}loMRWhHAJ$`5OO~5qw7|(-$e%u0=)#kS z{CX`^@pC53VA5vi=8eO+|9gb%c*XLHn@q12%uYK;u`1rkV_q+phcLI<9rngf`7W># z*@i7)Q_L|F*eQ?r6p3uMb73$y!NE%>P5gygh5Vn;wBG$nm`(ZrC#RYFUplRJ=Wzea zvoy=HQYk;ew&x@GEg&46xX#gM z&VWwY6;0N#L7?;7J!ktzZ8~tTpSF*=4F(*Lw{>@)E%gK#Ic5$K}ovE*37EUXMyHZh?yQ*Rxa zLD|~%EFbCj5l3N4N)q8NnS;5JzDUks0T+jC!57OGEF9QNB=m(WFrF5ag6kNH0OI=c zbc*2hpqFwdtWr6U@CLXivVj)M=&Js`&mp`uq9BYC-i9Jg z<$u1#mkkV=9MD3Tuq?muouaTHTn5;+;)42EN)pRK&wDa;wqUKNVK%RK^WfXfZ`9-2 zYi6UGnTs{qjp3CjR&*bU<*UVh$l)mWvF1F6nX4D0W;4&j)XBMHo9U@;sxQY$Wy*_XOTd;PbA^Wp8g(be##KkOB|Nb8hQLM3o)OR~r%DH=?J;rQnK z^78iW<<;AFpT>oO;Pl*UnNHo|)5zSzQ{WLc#3CZMpZlZn;A*%NvCNG?rNpu$>+EA_ zosDDb>}7DBZAZ5*%$uWGWDb-O8uxE*Kc4^l_UgJn)G^cB;ir!~jV*WSQYlSE?@6S* zYIJ&o@%h`!{_Ush-uX?x_jV^*nH!-p#5RW#l7Aags2tTn+EfSi^calkO+&%95^dRl~M0I<~E*Gv3z$* z3P|ku1;Z`Il@LA?2@Z#j9v_>&8_Wn5&C(D`5ffh}sUYs0`XcL5Btdu8fdxzPP)+a* z)dXgr+gBYy`skYMnLbynMPg198C*G@tDQOOmey;;Is5*H51-!lZ--aC{_XJmW50;< zu5}9)ssuF8U!y*t9PygUfN!tTqDrlT@#a;iQb-HZ3}lozCM1^PtRNsRmiJqM*B^Q@NL)LV>Mt5nO6ukFnt%--v}ZYqhsx;Yy79V{)ZZ*R7~5IMJq z18Pf^`E^z8-siOwu5ZLO!zo#|sldQ)^+;wyN-L4JEL7Rb+~2K4alhf0)6{OwYSlC~PEi|dqt>y*f8=I8eE0J^iWu7wY_R!{oJg^D;l)XIE~TA5vA zEYDG6v+gWecuwQY$1`kxyYVe?bQ-nRa)m7DKH6~qdZj*TL|!T~7I!vzY07HauS6c| zgkHQ(s5qWqo;MOPo@9>svbp+=!lpJmoBBt&(yT>-sI1c5>oi7}Oqw2Z0vEEg8HFGw zR`L7&)=6c&BZU=G+s>broQUWLD-QmXa{ozLgg!Na^_Smrwi2Zj^QA7f>igK<*WmMR zZn+O%S^`xNS6y39lLXxuTit{y6JuQyU)@yVlfbbK^hxgO^igs{>DC&7b8m%u4@lb5OLi*I+Y zALMFjclD10V4vucKhZ_M)a$=$ObNUBm%6{I@e3`$5NH8>q5)#tGWB|`sSo6zbX zW}ei)9JTL*?zaO$eFtY{s;0K7h*r9GvxssCVY^s}isP&}CRhNKJIq`-n%AiAmyz2% zEL#w@S?zT|PeN!j#vMYFOQMK|u8rHvq}43Ts^wSfTS6T|XJ2E{W-dRrCLyWaOMZjN zf5DdFsx?Cyr>b{qu{p5K72#D%5|y?IUG{mE_Z0mViN%_EFZ+03Rn>+U>iikhYy}mu zXD$0A3QhC#c3DC@j-ptvK24!MyLM9Y{oJ}~KLY+2P)eDJsH~9os*ltcDg9cj$%)dP zo=+)-KWkMY6P`uc7YM^a&F{aJ>;F&yun~Z# zc!D?S|5onzACFrH|L^m(TmU0&=*2rhCUdb+k*qzTZLlG?zc^|OCAjU~MAhykN?rc5 zuA4z`5hpbvYq{9Ou>^{%aBScHpZEJZS!d=0CJUJG4VPv}bPr7XSeN|3bj(t^lY20RI8M?EnA( diff --git a/charts/latest/blob-csi-driver/Chart.yaml b/charts/latest/blob-csi-driver/Chart.yaml index 09f926b1c..70bc7226c 100644 --- a/charts/latest/blob-csi-driver/Chart.yaml +++ b/charts/latest/blob-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.24.0 +appVersion: latest description: Azure Blob Storage CSI driver name: blob-csi-driver -version: v1.24.0 +version: v0.0.0 diff --git a/charts/latest/blob-csi-driver/values.yaml b/charts/latest/blob-csi-driver/values.yaml index f11e252d4..07d8b214b 100644 --- a/charts/latest/blob-csi-driver/values.yaml +++ b/charts/latest/blob-csi-driver/values.yaml @@ -1,8 +1,8 @@ image: baseRepo: mcr.microsoft.com blob: - repository: /oss/kubernetes-csi/blob-csi - tag: v1.24.0 + repository: /k8s/csi/blob-csi + tag: latest pullPolicy: IfNotPresent csiProvisioner: repository: /oss/kubernetes-csi/csi-provisioner diff --git a/deploy/csi-blob-controller.yaml b/deploy/csi-blob-controller.yaml index 415d5a055..9dc5738f9 100644 --- a/deploy/csi-blob-controller.yaml +++ b/deploy/csi-blob-controller.yaml @@ -73,7 +73,7 @@ spec: cpu: 10m memory: 20Mi - name: blob - image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + image: mcr.microsoft.com/k8s/csi/blob-csi:latest imagePullPolicy: IfNotPresent args: - "--v=5" diff --git a/deploy/csi-blob-node.yaml b/deploy/csi-blob-node.yaml index d4879c70e..3203f029e 100644 --- a/deploy/csi-blob-node.yaml +++ b/deploy/csi-blob-node.yaml @@ -40,7 +40,7 @@ spec: - operator: "Exists" initContainers: - name: install-blobfuse-proxy - image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + image: mcr.microsoft.com/k8s/csi/blob-csi:latest imagePullPolicy: IfNotPresent command: - "/blobfuse-proxy/init.sh" @@ -120,7 +120,7 @@ spec: cpu: 10m memory: 20Mi - name: blob - image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + image: mcr.microsoft.com/k8s/csi/blob-csi:latest imagePullPolicy: IfNotPresent args: - "--v=5" @@ -183,7 +183,7 @@ spec: cpu: 10m memory: 20Mi - name: aznfswatchdog - image: mcr.microsoft.com/oss/kubernetes-csi/blob-csi:v1.24.0 + image: mcr.microsoft.com/k8s/csi/blob-csi:latest command: - "aznfswatchdog" imagePullPolicy: IfNotPresent From fbd784a1a22b1f6b252323ceeeafee0087724a52 Mon Sep 17 00:00:00 2001 From: weizhi Date: Fri, 23 Feb 2024 11:00:21 +0800 Subject: [PATCH 076/157] Update csi-blob-driver.yaml --- deploy/csi-blob-driver.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/csi-blob-driver.yaml b/deploy/csi-blob-driver.yaml index d2de725d8..d13ff5f91 100644 --- a/deploy/csi-blob-driver.yaml +++ b/deploy/csi-blob-driver.yaml @@ -10,3 +10,5 @@ spec: volumeLifecycleModes: - Persistent - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange From f3035333a664c7cbc9fbc095160603b26b1ee24c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 23 Feb 2024 09:20:49 +0000 Subject: [PATCH 077/157] fix: add tokenRequests on v1.23.3, v1.24.0 in kubectl install --- deploy/v1.23.3/csi-blob-driver.yaml | 2 ++ deploy/v1.24.0/csi-blob-driver.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/deploy/v1.23.3/csi-blob-driver.yaml b/deploy/v1.23.3/csi-blob-driver.yaml index d2de725d8..d13ff5f91 100644 --- a/deploy/v1.23.3/csi-blob-driver.yaml +++ b/deploy/v1.23.3/csi-blob-driver.yaml @@ -10,3 +10,5 @@ spec: volumeLifecycleModes: - Persistent - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange diff --git a/deploy/v1.24.0/csi-blob-driver.yaml b/deploy/v1.24.0/csi-blob-driver.yaml index d2de725d8..d13ff5f91 100644 --- a/deploy/v1.24.0/csi-blob-driver.yaml +++ b/deploy/v1.24.0/csi-blob-driver.yaml @@ -10,3 +10,5 @@ spec: volumeLifecycleModes: - Persistent - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange From 91c11c62cbbabd1122df322e84e291644c2e1d77 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 23 Feb 2024 22:04:57 +0800 Subject: [PATCH 078/157] chore: update master branch build version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 369d260f8..fcb0bdabe 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ GIT_COMMIT ?= $(shell git rev-parse HEAD) REGISTRY ?= andyzhangx REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g") IMAGE_NAME ?= blob-csi -IMAGE_VERSION ?= v1.24.0 +IMAGE_VERSION ?= v1.25.0 CLOUD ?= AzurePublicCloud # Use a custom version for E2E tests if we are in Prow ifdef CI From 8693844502aeac2455b0d5dec5272f3f71caa272 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 23 Feb 2024 14:17:02 +0000 Subject: [PATCH 079/157] feat: allow multiple subnets in updating service endpoints --- docs/driver-parameters.md | 2 +- pkg/blob/controllerserver.go | 17 ++++++++++++----- pkg/blob/controllerserver_test.go | 24 ++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index 74aadc4e8..d03af01ea 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -47,7 +47,7 @@ isHnsEnabled | enable `Hierarchical namespace` for Azure DataLake storage accoun mountPermissions | mounted folder permissions. The default is `0777`, if set as `0`, driver will not perform `chmod` after mount | `0777` | No | vnetResourceGroup | specify vnet resource group where virtual network is | existing resource group name | No | if empty, driver will use the `vnetResourceGroup` value in azure cloud config file vnetName | virtual network name | existing virtual network name | No | if empty, driver will use the `vnetName` value in azure cloud config file -subnetName | subnet name | existing subnet name of the agent node | No | if empty, driver will use the `subnetName` value in azure cloud config file +subnetName | subnet name | existing subnet name(s) of the agent node, if you want to update service endpoints on multiple subnets, separate them using a comma (`,`) | No | if empty, driver will use the `subnetName` value in azure cloud config file softDeleteBlobs | Enable [soft delete for blobs](https://learn.microsoft.com/en-us/azure/storage/blobs/soft-delete-blob-overview), specify the days to retain deleted blobs | "7" | No | Soft Delete Blobs is disabled if empty softDeleteContainers | Enable [soft delete for containers](https://learn.microsoft.com/en-us/azure/storage/blobs/soft-delete-container-overview), specify the days to retain deleted containers | "7" | No | Soft Delete Containers is disabled if empty enableBlobVersioning | Enable [blob versioning](https://learn.microsoft.com/en-us/azure/storage/blobs/versioning-overview), can't enabled when `protocol` is `nfs` or `isHnsEnabled` is `true` | `true`,`false` | No | versioning for blobs is disabled if empty diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 009396878..5dbe00477 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -274,6 +274,9 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) enableHTTPSTrafficOnly := true if strings.EqualFold(networkEndpointType, privateEndpoint) { + if strings.Contains(subnetName, ",") { + return nil, status.Errorf(codes.InvalidArgument, "subnetName(%s) can only contain one subnet for private endpoint", subnetName) + } createPrivateEndpoint = pointer.BoolPtr(true) } accountKind := string(armstorage.KindStorageV2) @@ -284,11 +287,15 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) storeAccountKey = false if !pointer.BoolDeref(createPrivateEndpoint, false) { // set VirtualNetworkResourceIDs for storage account firewall setting - vnetResourceID := d.getSubnetResourceID(vnetResourceGroup, vnetName, subnetName) - klog.V(2).Infof("set vnetResourceID(%s) for NFS protocol", vnetResourceID) - vnetResourceIDs = []string{vnetResourceID} - if err := d.updateSubnetServiceEndpoints(ctx, vnetResourceGroup, vnetName, subnetName); err != nil { - return nil, status.Errorf(codes.Internal, "update service endpoints failed with error: %v", err) + subnets := strings.Split(subnetName, ",") + for _, subnet := range subnets { + subnet = strings.TrimSpace(subnet) + vnetResourceID := d.getSubnetResourceID(vnetResourceGroup, vnetName, subnet) + klog.V(2).Infof("set vnetResourceID(%s) for NFS protocol", vnetResourceID) + vnetResourceIDs = []string{vnetResourceID} + if err := d.updateSubnetServiceEndpoints(ctx, vnetResourceGroup, vnetName, subnet); err != nil { + return nil, status.Errorf(codes.Internal, "update service endpoints failed with error: %v", err) + } } } } diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index d7961cc2e..3009ab856 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -407,6 +407,30 @@ func TestCreateVolume(t *testing.T) { } }, }, + { + name: "invalid privateEndpoint and subnetName combination", + testFunc: func(t *testing.T) { + d := NewFakeDriver() + mp := map[string]string{ + networkEndpointTypeField: "privateendpoint", + subnetNameField: "subnet1,subnet2", + } + req := &csi.CreateVolumeRequest{ + Name: "unit-test", + VolumeCapabilities: stdVolumeCapabilities, + Parameters: mp, + } + d.Cap = []*csi.ControllerServiceCapability{ + controllerServiceCapability, + } + + expectedErr := status.Errorf(codes.InvalidArgument, "subnetName(subnet1,subnet2) can only contain one subnet for private endpoint") + _, err := d.CreateVolume(context.Background(), req) + if !reflect.DeepEqual(err, expectedErr) { + t.Errorf("Unexpected error: %v", err) + } + }, + }, { name: "NFS not supported by cross subscription", testFunc: func(t *testing.T) { From 87277020dd48798e8953057daed005e2591a113f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sat, 24 Feb 2024 12:03:18 +0800 Subject: [PATCH 080/157] Update driver-parameters.md --- docs/driver-parameters.md | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index 74aadc4e8..7bf4833a1 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -1,14 +1,27 @@ ## Driver Parameters > parameter names are case-insensitive -
required permissions for CSI driver +
required permissions for CSI driver controller
-    - Microsoft.Storage/storageAccounts/blobServices/read
-    - Microsoft.Storage/storageAccounts/blobServices/containers/write
-    - Microsoft.Storage/storageAccounts/listKeys/action
-    - Microsoft.Storage/storageAccounts/read
-    - Microsoft.Storage/storageAccounts/write
-    - Microsoft.Storage/storageAccounts/delete
+Microsoft.Storage/storageAccounts/write
+Microsoft.Storage/storageAccounts/read
+Microsoft.Storage/storageAccounts/listKeys/action
+Microsoft.Storage/storageAccounts/*/delete
+Microsoft.Storage/storageAccounts/blobServices/containers/write
+Microsoft.Storage/storageAccounts/blobServices/containers/read
+Microsoft.Storage/storageAccounts/blobServices/containers/delete
+Microsoft.Storage/operations/read
+Microsoft.Network/virtualNetworks/subnets/write
+Microsoft.Network/virtualNetworks/subnets/read
+Microsoft.Network/privateEndpoints/write
+Microsoft.Network/privateEndpoints/read
+Microsoft.Network/privateEndpoints/privateDnsZoneGroups/write
+Microsoft.Network/privateDnsZones/write
+Microsoft.Network/privateDnsZones/virtualNetworkLinks/write
+Microsoft.Network/privateDnsZones/virtualNetworkLinks/read
+Microsoft.Network/privateDnsZones/read
+Microsoft.Network/privateDnsOperationStatuses/read
+Microsoft.Network/locations/operations/read
 
From a6fbb05ce93a5cfaa315b6fb79513128b37ae816 Mon Sep 17 00:00:00 2001 From: weizhi Date: Mon, 26 Feb 2024 13:59:51 +0800 Subject: [PATCH 081/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index e94b74a5b..2794eb817 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,5 +1,5 @@ # workload identity support on static provisioning - - supported from v1.23.3 + - supported from v1.30.0 This feature is specifically designed for blobfuse mount and is not available for NFS mount as NFS mount does not require credentials. There is a standalone blobfuse mount for every pod, it may cause performance issues when multiple pods are present on a single node. From d9183c6cc3110dbd835cccfb5888ec874a837620 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 28 Feb 2024 10:12:38 +0000 Subject: [PATCH 082/157] feat: install blobfuse 2.2.1 as default version --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5970 -> 5967 bytes charts/latest/blob-csi-driver/values.yaml | 2 +- charts/v1.23.3/blob-csi-driver-v1.23.3.tgz | Bin 5944 -> 5944 bytes charts/v1.23.3/blob-csi-driver/values.yaml | 2 +- charts/v1.24.0/blob-csi-driver-v1.24.0.tgz | Bin 5959 -> 5959 bytes charts/v1.24.0/blob-csi-driver/values.yaml | 2 +- deploy/csi-blob-node.yaml | 2 +- deploy/v1.23.3/csi-blob-node.yaml | 2 +- deploy/v1.24.0/csi-blob-node.yaml | 2 +- 9 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index f8b67aeaa48d507933b1abc8c7a6b0305d7f023d..46130d98b0e1f304fd7d1b7ed972c46bdb5e2ed2 100644 GIT binary patch delta 2140 zcmV-i2&4DXF3&EIuz$^I>*%=E`L5MIR*z6?e=L-@KuFBLYdx7(c5r`^hJErGxFC#n z(V|_$zQ6j@#S(<9IUsCKe5s$E{~mBaZ(TM)W63z40lF9ukP)65#U6A~Mx<(s75-ME z)o9gTddP>CXxa6@z-|CyzY1Xe`fs<}?M`<6AGc2q>;E|#@qh3Px;2C*SilH;)Ma6KpCQHuKb5mrWEDLL7W1h-7@(Ma{b(Mbj3f8A*|bka)II9_nvDaNTR>5_{Q2 zgXxgTYYqaadT9%C&Dnw|{Q$gx0288l7fgUtkRTKZMbGl%PBF>Fu~sh|NV;SJ6ok0u zYy#CNMqPZ>C4WJ$(`a`#6Qs<6o?cudm=Pg4-eRDtbGS4u9(7N%P+`orAR`du_id(_ zn+^$D?Z&?vN9#$|Y?lR2xJ_IK6PMT*>PfBk>J=Kn6gW^DkYtEaK#U`gaXqPdj5=r$8fHwbGw%zqY5iN&30+Kf)gEZ|`I%&}}n zB_xr>n%UP(tr%H2eq02 zz92T7+cpcRRP$?dz!JJh@&M2)^s^d@`u}Sb!Xj*d1^Pl8TV^q?9$EsUS4c67*c6RJ7D9f* zIgjWwuBShg+5|c+hdUsu@d>uGlcLbCECnqZ7m8C`nHUN+rj$vn{?amm9Y_D;x=79e z`OFbDpE>6-R2Mxw=Fzx#0`58tmm0uyMwW#x0)HWym+42SWXJ=F9V~Ip)*y+F2)#mg zKMD)`e%|s?JYB*9w4H>}H7AS{xx8?((D;E0WlrgjshC0{;0v+<#6hrt+knKJaamRR zZANYIWxN3N>@?LP@I%s;&jCDb%r$F%lAodeB4Q~dc$V!3LISsJP9(9htEi8`4ivPO zH-EU7Xr?$-^T~XlaT!qcQZmE?idKsbVSy;4oH2hBCl7dzJVww4?Ath_HMti*rLgdBqW!uZ`vPslkDtNF26;5uqS zIYi)z3yx9(G2_T#pypK~2YjFoVibKG52Efhq$HLTTU;|PyQuT8)1%|!y>AZKmGc;^ zQ`6gnOKdE(M7;{eTTZO~L}IatR8FJwL>k$R{A*1e zo=%Bc&Y_A^+J6j6wK5()lFS7jYF~}TLU*w2qPpHGICG(q)=-|7pn6D=`Kq_VR`1^v zAw~28rcB9(JeP30Wrlm<|R^{vY$(?GyA zXZOn$r<2rEjgU|wvFk?gE6^Bt<}a z5f_Gmm+0wbUey0(&V#0BsLen54wle+n;X;xw&!q-<-CiUo;KaFuk`y3@n9^m=MSg` z>5+sk_LTi~4csF_7;C8Y=FOYD?tNBmSbFMtVFmnDBKUVY6<@Hl=MI~F-4X4igbQ_% zSx@JR^^f|LV&twi1uO%uWq)BR-g6fC(W!0(*!h_g30Kqxr2^iKZg<^u$}R=g%C)hSe{Q>6wO^w1X~=NfD~sv(I%Psv}1OAqU% zJz0mgOUwFyC3wE}-8F|-H~n!#-peP`*cAUcZnZQ1-;>kR*0XU390CT_>%;#446~C7 z-35Q!okRZ5b2Q7cQYk;eI259j~qXdBM|&$uKz;;z(xR`;tAfQ|694=e>`p-{J+oBasiCAp%?E2 znassPMY8sUw!wzn{^F=Dl;E~^6IHvHD0TVIx^4!&MV!=xtmR@8#}X*6!m)wdNIHq` zVL{XH=N?q+U#D%a|F=I0Y}EfJZ!-G7b8>R{{r_iaFQ)%vgYuyH51Rkb4(-qm?a-dN S{a*k80RR6S@1cMIr~m-l>L23( delta 2143 zcmV-l2%z`RF48WLuz%$A_)Yuh^t)F3xc%njJJi}A3*{{k67%m`Po|X}+@GXjpL_-` z2%}xJXxFgsul{tg1R-k<2%8gM>SyP_2OQ8_mrc-EGLC0}F2)07gr`Qa2VImAsoG+N zztw0pTD6xR@}VVKcKt7~8-Uoa0$9KP+wFF{lU@JEoi~T||9>2fcz6cg8bT8+U<5wv zBG2XxkJy|EHkA#VdFrjpCJG864n7k^GQRAh=G~8?X$#Vfq)0DNXZdlbnB?MEtCtNVU9tcQ zLR@n;foc?^E`Pr2lAzaVv^$##QszKUFRl^Hh>#p_F;LYxT$&b-x~EyFFlJkj5eV}8 zHdD+^hXk#5<6n)V^`vUH%K|6dCa!~tOY95vq*i^)6AHYk@L3v}<#V#Y5~4X>Hf#F%b>O;V!%Y|c@@wrCvXI%-yk+?HmLFhgK!(_Z zT1^075F5^In*~&=`L#J<30)+40O%F^Sq(+~|1}C>5jMaAeIc%^#%#JcXv#QRvY>&+ zAk}nCv41N7k*Ru!zF?{!1_IRI9I-F7q3AIxIdfg$UG=3cvlv$oErHQ1q?ko)ipC)e zA;00ANAwxj(;rH00-ct_9T3&{1l!q3QRr8ef)%|qyKST zB zUZJ}mg@t`TZ}}*mE@1)MPQvJ#6UK>LUbt9j{6K{=r}W2EOd%2Q1z7;%AXva{Kw{3g ztSbFBqc-?5UI2P_nradFA!*C!0G>AHnl(Sk&rp96u@n+K%XR}Hfm=2wlGxZ))W={4 z3V&M58{A7YQ=F>#WWLY145)f38R7v&t3`*fK$KC=m_(B$5+J2o(pW8ejqbu=Qxqmk z1oznUT|iy+cLt=39u)3#@kuCAZ?{@)^jOOSJ=SW8C9x8=f~BG4QmLm4Jzw#v?jo!d zn5AEu&BiH*7CyG2i_{0P%>31mk~Q|F?|=Dfc2|8?;y-6X4ncll{BETu zc?{O6>FvQKHWpo^{|e|8{fL!3bd5ra9$pxJu3bzae&w>6l6kj_PBbnf$-AmTs)%r-nryPC@KqR4TKTuNO<+^pEUIpVVC)R!!C{IgJJ*3Ed)mve! z_wR|2B6O(fR=ime`)PF1C4;P28gz>9Wv7SJtrnlC-T2hZ+Od;vF1L_hA zpP3tY@BxY{d$$~mc~a*lFJ1E|9K)@74v zAmEy_`{jz$N$ROaNT`t5btCu{XbimaY1G?|=Mb(|=Kb{D|+b ze4y`$3&X%m^z>i;t5LDMtT=AV2AOX$7L4eA2hbGXKG-bGDMo9@_G`u&D@FqYW! z2ULUfNJ1BT%Ko|r?hzr3HPm|Z=1pGrKC3n?J@ve>0)8qH{5zeBFId`hht0n3h;~xK zg}TVBr*p;nM}0~$a#x!ImVW`)vM?3zISc&gR5t?b{LG1jD{6yM0q@2)OCMIa-h(uH zVl1&bD;Zg=8N0WeGVmL=APud)LSKThp_LJLJ_E%}UGW%1e~*Ldl-zew{c1e6=HPnj zt?vJxje8$1SDK<;N3YOI2O;CRQjdS2OeqxckJqTzfBWhEYcv?XyMKC(em)-!2gCQT zQGYbL8YweO+0vnU(M+fZ@!vI<(BNK;MyXO4WEV9Tm^WQAX@*Z~dQD5ty2ybE!L(LW zMxD#Bvr?K`O%tGU&qr(2qhZUK5n+6e`rIQiBY7=nIi^4YnQCki?*;{gZ=+3 zvychh1%Erohy0)CXqIKAQhtQV*>BwaXmS_ZHIpH9F&>z5zV*pKX-{7!#b<}y9V{ib z=Og$nARL^y=Fw-)fKJ&JP1dvJs@M{Yh2gJ83c|2xw!TX;1$$|`@R2#NDyO?v^CG0V zMnyg7`*ZMsV^^2o-2ok?>iR5%1BZZ4oW(xrHYuBW8#?dZDA@kJJ;!tg-;e^Uu>L7? z;7J!ktzZ8~tUL@GQ!{Ko|~ce*djp|AzvAjQ~8w6TC_Pw{pM#c-%Vpf1ju20vKsS zFWw0hhm;-3)q*IH?I)%f%*+B~V<2V*|I5 zbQ0aeL8jl&J*d{dPTOApZ+{ZlsQ*viWb}XMNA`t?d6>?X%PG+HX$J-k!Bi-n8C+*J{6Mzkls~hgyeYp}Yk`V*Xw0 z$+WVA`wwZ@C!c`}!e|#Q+coU_i9Z{ilg3HS0bz6EOa1WTkAMSu=dvl9NXGFT(B))+ zjP%qf_MnS0GF4kwk&e0K{Pxz~=SeZns;l?D~JxetTU1&(Vm7=g_So zG{pkO;Il6BY=7SHh|QT`Gug12r{213s-O_!;4?ua>)pf(`M5Tk$?M;_yV1lSy; zP>-j80s#*CvH+^d5IRR%T!lixVb(@PDSlXK7@X&&d)?h^BPatm)^Yz;!2vn=bnGx7sUYA+x=CtMKEkKETR=46z5b zngG5eHeA>?3#e4{YjeO7x=8W>&@1$d8jAY&YZSsFY=8y&LR?pk*>rKxjB&JLK?6-d zs_B?vR{$b2^$>l*R6h&^sJ}U4UuZ+oV^nhHx_`jC>PuT@F|Hq40;5+*F^kv?O+pqz ze#1GB=rgXTKNQ*oIxU8KAgb{xwzHF>(64O5r+HLL%TxvIN9Iuz=fu#DZ~I zRr+m4ZSZBh0QBrM)gtgi(w5HwJZ;uBYkrcSq5dLbB_w#3?FK>uw`@)%v9YVDPrwco zw3auxmuRjyRrASwpK%#b^-?m#1B%v*4u4^RD5IP)iKZ(gKuWcwv0C&R-KD{%C`?xf z?y={)fV%4M3`iF}DBKs~lTf1GZnfIzv6csVtkn`rVl8Y1OGC+}Qcst9zT#EgMOY~? zOTRRGjZ+RSd~8D(sSjeA`Kuu%YwSzk^VRIG`>e%(!Gs)w{KEL%T2IIY5$pM}S%2U< zYC$9p1QNW!Naeh)w-p?_>YP*ULKx^<#n1>-Fz)_x+f*jg&5(Rm_`>_+~z zrVh_$L@nn~#VPGS0i{|Q43_Ea>Jkc{ znHzZU0ah2b4MO}vzKox>_E}PB*0VB@t zS8JxGsizttp+aKUjo{ayG4R@xQExZiG*0V9-8u#RI&sHNqtj@cjsoOIe1Gi&{fW3R z47@~7FY}`Q4|57MJwwI)$$xjSgx(}U)i}7$UeE%Bt$K&g< zvbdDl9I6-1gnAJFU4L^44er%wl-6`Xc2RSQdDA7+X85G0*RHeZMr z5o7ETs{0Fd1d~A3c@PWzC^XnLL8%U$LLDGA$e@S55V_D`yMIv)Nep^Q{z6=OSTF6r zI<}{^tp8Vn=et;fXb!J$`jdvdmrtg#E&g+I*2(yPPtV?-9R0t~(QcFy+7#1K3TGZ3 zMIG>6wgT-z(VmJ>*EDLi8;0x!*mqly8P|ryG7ztk?*bN}4v^hisBUfPBFmzY<$C_` zfEr`aaE-M+i+>&;6-x%0Wr=WmJg;*Dnd^lfAM41n^ag&^SgBUN{%;rH`oKj)`cDi6 zZCU@Fle0IO^?!Eywsl*%R=dx{vzt>0hoV>XGZj=_L(UyRsWwls^8Z484UA0rVKZu*hCqV5rA<* z8*JG3Gk=Z>W0XHUplEHUzD)xacGAaWYMYK(Wm?X%ahR9!>n8YAFA25Xz$qI|31>F* zd`9&VBH>@p!}*v?i_LZ(d1(7u(^Y!xQIsR%eHMNBi*?RKF~W8FgTj{U@+bN|U@|qU zkB>EV0&0jwQ;T8I5JLHEB-sg9QZg`MUL(o=g?~MF1H_te>dTsC?+32C4$RFMk3ISV z6@O?R#L`jIZDshe{0oP&=29Tu0_34S$~5$l~`KhLG3*(x?+@196{3#zD7W>&$ff z6D6r;24n-GAK%E%6*ZKGw|yxB@Ejb2ysmeE=cM=OQTvYTE18 z^1M^eJ=FFqb{q~uoPYnrMXxvRPbPmW0CWL6z%2+?8gg1YEW9)v<^8r?}lJ{W`z5g3Rla$w5Uuk1;QA8fH(y-VcAZ?ao@3iCmZu zQM~^Eb$mvsJpcZO%gNxjKkSXJ2g9499WhJ}T`!haL;u$;CYwklEq|tLyh!SkCmAZo4mfKs7JKkpAEzxnRh;ClG)7d zqMOUnK5V3Aw~%rOQh$+hVP>6aGfdoo!_9(&STNTy3661FNa?=^+09Zis<~tW^w^5t z5lSS%ZVz0%nt;uyBf99Mm4XR3xb^FH`n3<7FsHb4xf%GspT_;$%W=QgAKnZuu5N!C zT=k2L!k;+vvSZaps~MSp#Qq0ZjbWAth(1f0@XEol)ZwrIPJd_Ws@W@e7v)?f(lxUK zNh--=qc))AzT0e;n-byHMj@+h;%qZiuV)1>V&LbSo6+QUG`{}lzl!PSwO9gg25I5` zvW3Gc4DQa~F6^a+>izZYr1#iYfdRsVDUsw&ld#$d@b!0)k+P&yCQb>hlIV*5@j z!P2QP4EBeg2Y=)1;m7{)MvMLE;^ya)^AJcoQKHk-EDA+O6K?!lFuHw$wp)+u$lJ4> znX;WT11&@pujRvj6(QUW#;)Q_2)Ahims$1B0Xx^!T@~nu%_}LvtoAK7Oin1O&%j|E zKQF5ZKkaR|L7gTkVOos+}*YI?f8+ZqMON`>VCU)N#Qw3d|xj5MTl4jGyc5^S{2Vzbcyh zZ~d?yvs$v;45I~3?n3_b$wHT-JmlAFp^9HHVFr^nJGXBf#{J(TT*oU`*W6@!tzq`s zIf_;BR(~GzdbvD=xy$ZwFm}p!fvw24Yzf<9j+wwtdBmqkWV4+MgSiO~UOH*wzo=En z{|Qa&{jY@Cmj8cxc9zNiX|<2{zdTQ~EGw1rBTUYIZ)Y3arBVr_6yTT?n;#{hzc>bM-%OPfw5e|IgCG{67pHqZ8O8{ZC8BArt>H2=`JO zfi5n!o=A=*%)@$dWfK!RH&xh?8I&zv&+?IeA8{0>q$CmUlR20h>5JqH7I1OQlS~jB ze~t#dlsjp29$^B|p(5JVqa3j4e6dRcp<;30)WKZNnOQmeXVb;Go)Kw<>>~HnTqB?i zpkBZUorf2Q5IP^sDO*48--8|KTRKx`yd;A+z&()-v{*)0_3wQS;aw61VU+MX6lv}Q z$hY_kf+3RwS_l&s>leOL6qba`0J~OPljaZ*fA;lm9(=p`je0x>&1_UNbGaeAF}x_n zif$>fe6`q*IUMCa)||&MbN6D@Z032GIyrZ2Gd3vum5gvF}(e0d_BDB4|~Nf(mG|7PzfB{k}PvciU!kQIJvpFy1IRLb^Y$APm{tx ze{g#4wM?gO_Gx5p=PB?A8)6ZW+t2;+WNH{g0h{gKx_&me<+Y<79`u>a+!@c#V8{z%{*WV7f5>>mKbDH zrhO)NHpcfHNtHXs$B&7<|JjMXdSO=0SyOnH=bg&IIgNbZ9NOXwpex8*!L@9MmAb72 z@{&;0WlstG8q{}}#yEbS0}IbH>yM^`;Xam&^Y4G?jR&9mV41L#?va6?_Nm(i5z;;SST#GO-LWHX8+=)O9zU@0D}37(;v!0dB}sv}4r zU6Vc2=ZcL;%xNNnE5~!SGe_Ohe|n8L=imSE;nTbR?eMzSza3tD>=#kqw{D?Am4N2? zYt#pnBi>LM@a zUHrUp!u375W;i9wHWe6nLVd|jR{DOGCCB=XV|_;sRmpC)9cy}1I~I^s;%}bRNhedr&S53;Sl;vEV-`UC8N%sElx9#Ko-_O!AD?xqqhPjwhU)&!S|Cyip z88a7B1wtAc2XetU`9p7aiMqj!RYSUGXna_3FM(6+CNERf7vJt*KggAi=;|K_z&_C> zf1-L-e>uv40Nrm7 zg8B~5%2Z8lQxUCn?Pd|>5W;q`5EaK+aZIoTD)*SVa5S$`-7h1zeOR_2YS%mHfS!cV zW{f+8CYMAJ4P6^|kV&gqmQ~BI)^~(DgwDUlqRm`>Y)wN_`(b^D6Ht`YRHP4f9_1@xH364KLLBPf)WJRK$U`?2{-o&ClCq z3GF$GV!iq_h5GE;Ny+zf>!!m9_+DTq`pY$*IG|blQ&_>=SSt*HFu#R<=%>44tcfUAl2PL@c-9**yB}!fXv#FawZxJUo zAse~aPR6kWimPyJ;I@)ZqI=l(`?*Ke`rp%b*Z;ep1h(q`)3+J@-#I-y-v9S3?Zxze iY)~FG|55WF+p!(nu^rnpxBm+O0RR88WRuViwlS;Y7OanU4 zNE1oJnmGRNPf40#-eJJ3y~%^}A?uk|cT3Z1wfaR&TsE<6K`e)p1@PwO9CO)NV$c0< zZ>`m8wa!jX^#5C}R`&m`qqEcRj!sTb-?Yw-j@!rIwT@1X&VNq6L#_R>P~HL|G5@ah zWLnw5{Ye`3$!Fk#Fxo+jqZ;=8#Gj4!apSn=fUr67rG9wvd%yv`b=d@sCF6Jo=yKdg zMtW)#d(c4{nW`bYW+YG= zSmIf{>eT~3aNQwuiM{Ng{&c|Pkb?j!YY5qb3^`j6r7D0|Bc=q`k2@u39yd-KtxY6d zvH%J~3^|)XHHuLeUv){)Yd4Nsn+a0pKvyrc5zL5?9Di>yP}MnHrWTL7r&#!LGY7UH zBM{{GZDyF84yiA?kyOogS>S{l#xR(;#J*HdYPDCd&Skqy)Xi6;ZMAK$;N@f8E%V&;d zGb%Bmz<-+xpQVvmJ|_z-A)3-GQb|x zY6AFz*l=OnETB@&ugw8V=pe}hK(ElxYAEX8uTcn#umKk63vpdFX4Az%Q^wJf1r0O? zsitF!T>*$p)kE|JQ~fXyp#J8FeW49Sk5S2)>wg08s4s1q#khKC35;GL#Vle|G!9t^ z`3>hhqR+UV{!nNW=(HH_fT+eN*v?LhLcg*Zv}jx^PHkmuDAbrzCb9ZU%LH~D{Xf@5 zat_F6j;Q&}xrm`U=;1Ms#>EqG*I~HQ0H!mtEOZ$N$-GQILM1~UNbF#VYqkbSbVTSC zx_|poSlIXTHjv`!3KpO(CX9xhFizz1(#1mK2P%|dr9Y-(3W-9fXwv zv-C@|*Er?S!pAmrkoq8&nZFuRvc|sjJYUW3s?SRN=S;`}$S;iFt@MPP6S0~fn|}qa zqZX7y1fICyC?yaxjvNMRUL|tC2kIb3(Z_K=>Rv-iVmYzJka5{T?SGvepA>I>bHJ{g z$6)Q6-X2_HW6?qSuYg|Dk66h=$0(%e;ichc=wb@-E0@ib%)1?Qs&N@f7KU8Wey{lQ zh!oLZig{1(O{cBaM-o2e@LK>P34dk#fsz6**R37(Dj07$v7;vvi>;+{8to_2$Zq6c zYwGZHO4M==Rh-iPV^FG<@$ivkF7QzMYAhDIgJlQR^-jT=3yrjf^0WliLyF8-y%n~4 z@16)Lq8Biof-O6!K44>e4o=`gy%PR#arjCYzd9A`31n(|Yt5@A_4vgUl7D{Nr!JxJ znYn=nA7FW5+aScxM9%vTP^rZsTI3$S|3vWu6IW@c4mzr)U5<150jNUo)@74vAYjPZ z{c^?BB=uAyBveT3x)J;eGzMOIGU`W-lg4Sis9U?BUpwyDZnPUM(@}u@i0`j`pznwa zgTPDl^fE8%|1zgQ(=$}upMQJ@OX$AM4eAoxa~NVd@1Ulqt#s@wRenP}7)$K=eX2pa zB%yXaWoiw9dqfCh__W@DNUZ25*QNJDF?(1T#iWo55j%s??yS3CyM-{W99CHEavzaEdRIk=vBtNXua z;{pK?4 ztn{PS$ONeDcSAC-&@W7z+t~dT^{3iizPRYB#jb2Db5_xZC2nCM$c%!6JWTOS*?b{h zM2xXVsO~S+5ljMA=Rqv=qtIa21f@D~3Uz?gApI`-LgZY7ZGT5KB+>6G`3rIBVZF5f z>d>Cnvi@HQp6_A_qB*#}>5UumUOt(|rufhCSv%wZJv}=+J@|j0qunSav@WKj6wWL> ziaOxCYzf+fqCFL%u4&Y2Hw@Wxu z0X0UT;TkJ@7JofHDwYf~%M#)CcwXlQGS>?|KGuILyoVbrXE5mxS7G;FOJ~gfp9Y zKBM{wk?^nQ;e5=c#im=2JhXkS$ud3mD9RD>K8?Qo#VTi_7~v}YL1D{v`2+nHFqxXw z$H$sF0X4+Dsl~8p2%&s7lI(;lDH)hBuaRW`!GE5+24Y1x^<~Yn_XF2m1?HxV#~yuw ziodfT$IcQTJLWJOU{i#1K{wrrV>*Mi{m?2dr>3`LH0go8w9fsOTKWE85G2_#EoA-v z-)^TL``~P#aJ3^ff>T1K@11f9DgfQ&;qPaM#MKRCV#iQ0*q)cM*pyBpl zpnpQ&bD0TWL*Z^=oLD+)x(O{p`EA2pZ5MwaN~i-=q7wx!e9-acIEGb>K6-CRM7vNY zbnHrdewAReGNVPT=6AT543wW3WnJHlM=H1U#8mkMJSOJo8h z?w8J)8Dy19`gH*9l#6!ZP;ZCP6Dm6G?om+}osa8-4C{ipGj{0k$&ihn%T1X#5-@bsp`Y zQ9vc}pleQ$YsZ&}kHmyiL6a>;9NW4vPgWvco6J58Kl2=mAqRcJYu3}ts*z55XvEof zNu?F8(+;X9CfD?jvNX}-WA5EL+<%=-&=Swk<73@SfGeOPKl`JZ-UkrEd?u1&pr*ZU zEzaBZ+(T`@V#nbi#QFC>Ty(pm-gx|v0zenA1KffzHEc6P1y^JN5+$iDkmpD3R!bDT zhkMDfW%A&xXpcWnZ;mTq9lOUqv3~Q#`Dtq%#3bNCo*%V}fWoMsHB-j@`F~H0GCuq~ z0^7yJGx5J113(wqFJJ^eoZ-|vWI}Nr)C|)`8-m$)Ss5B8(nU9kN1R%kmW+iCLF$lZ zPC`sn+=SAjB;>dm?iF7kEr;;L>o{tT3ha!OwHN|s*iBd?S5OwI z(EWaO#TwN4Wt#@xQB&-TVt*Aqk=6)h0%;)i*I2FCW03A zhbZ2EfI2=SRGxqT!{xYt+Z%L;*Zsjw(T*4gDH)$3Woix~Ln=4Lp)9geR5{hwm`c`cT}n?YK* zzii;J3WMA8w+(w~p?ZIPJMMnCy}EvXd)52gyQ)expD|c64e(p6Bb3fVb)C2}me{`2 zO0aY)41>MF=YRg_dhoF~xY1%iytw(Pkf#cO%LUquLajj^jZ6T)rUz-3mwbHL6ub(aPDVf{)QitS z$Ir`Z0=ebiWj|t;fPY7tHnt=WgluYWge@~)Wc!KP;eQOFWOn1xAz6Z2&BYcL#_Zj_ zR*1LI%@slJ?pAvut!n3ss*bZlx7%|#)BbMFF?C$9i~_R=4Fp)gIpgO#`~2@O>#vID z{#!q+$E=ntH^XRwle>^VeX`KyC=dDdTBzdZOqju>&CbmmhjIUR3D@z8KSziAU!JE~mX%8R5hiE9ardLiT|BCp457<$-xT((PX
q$CmUk~x?g>5JqH7I1OMlS~jB ze-8WIlsjpC9$^B|p(5JVqa3j4e6dXep<;2@)WKZNnOQlzXVb;Go)Kw<>>~HnTqB?i zpl-kkorM>O5IXD6C|f=5-Gd$ITRKx`yd;A+z&()-v{*)0_3nKR;aw61VU+MX6lv}Q z$hY_kf&r6#S_l&s>leOL6c&Wb0J~OPljaZ*f9CaW9(=p`je0zL&1_UNbGatFF}x_n zif$>fe6`pQIUMCa)||&MbN6D@Z032GIyrZ2Gd9%2z183*y2|9h5RI z`%>3vxA(SxF}QsfL@C zb}w#v-M2f@%G?u`A+|Y`pll~A5bFX7e+s0T1?na>gZlQ;7{||ZVBvXY_0e=N+{bc${{0W#QU7yqbUW(3za3v+ez+Z8f86|( z$B>q>HJOKSAA1p^R%mY;%o{Ei-|s*oEFN>PvaGMJXiqe9Ol6 z9F&I4vs!k4Z!u=AQY}Be%r}QHd)x22sU-UD=4jw|u(YtgzS;UhrhXRsH{OwXelHTmA zi|dqt>y*f8=2!Ca0J^g=iiHohR!{mzfr>ahl(>A25|>>=BhOJ9v+f93c)a4w$1`kx za`7#3boR5>a)m7DKHA>?e|i-14{-*{?(%%6neCyr(z}U!Lv}F`i_O z`LenCX~Cwp7MuFVw9>3af~bti-0L((mrR--a{?E#vl&GWCRXwL{gyXnoEn9NP20|& zm7Iv^*C!5ck8-z1SztXif%TW)a<&qs6!WE?vFiKS-q+ysZf>~`e_vX;R1gkbTLqH@ z-53(xgeemmT@x1FRGE{&u@3Y}Ugq>sazn+{8pCII7I)&H#O`_WMBT=}1-c7cr4ID< zj6JCgZjJR;c{)-l39BEaN{dISnRlqedzm`C?ai_cQdnt!s4kAV{li*onXP55%-E|^ zBYG%aENtcVKYW61e`_z7fN!Y(J3Br*&eZ=My=fip|9+O1SqbW+H_XM9`r`hu_|N>z z&xpB@DiG4pIFNJ3$?tl*OVkants2rjL*v7OdkLIkH+h+=zW8?c`a!OAL`VNP0QQM4 z`4b)VOTGT9#*{F?f2sSc8o$t#41pHFCmLV|vi=&?T_W`Vf3+>H{$b`x{mW4X1n7P{ z5Y%^YR;Frdn~G?qYd4E1hY+@lg{U~rierKWP`Sg*g`;_m>V6ry&BL+-Xz1Fwy-ZrovaDKuwZ0|PA$0aN7H#J8V`~zU+P&mAnEV%P8LnD0lyRzh zrxx24+guS|f2AZ*X`9gZpI3QL(O;2RteN++kM~toZFr&1pFz!5P!W69vQMJWG(T^b zCA8xxiuLN#6za2UCnev{t(*2E;C}(7l$nUi3TdzUNPUsgueF+-DBbD#lv4P!RwY6n z%Ji=#6S4;h-fmw0AMCL^2}%{2et7sc58yA{s-FL!e{u=PhV%c?@o_tQ|If)u>u~;m zj<(_a|EwE{K`oER? z{l}Bm!Te{;ZP-+vr^hg$n%p}Yk`V*Xw0 z$+WVA`;#>6lh42fVYG`D?Hcy|#Gj4MaidjpK-ir4Qa?QZJ>Y=ex@>~Rl5so(bTJ+v zBRw^WJ?Ns0Ow|@yj4wUtLtC}%`d?r-0I^>Muzvlw+wIP2cKsh8wGQk5IU4cs47xRh zCRo4-eAY#t&3_vnu{jfLDjPQQ)LWNL6cj=nd?tuweAz`!CPectm;k3BK`1$DMgpaQ zC7#8rUOn&w*S%&gv6o#mm=2k|<{*H|8bY=p*PJbgQWe0f5mSQe$DI;1j~lJcBwexq z3PN0SHi2psqb|PclAzaVv^$##QszKUFSHTNh>#p_F_XLj9e=c*RLyo-;DkHIbue*> zeW9MzYOh|Q5ln#twE;7P5DijJ1(=M7z z>5FbN!E}SLro(K}lvvz}rp@S-%mNOU&m7BURANGbHx)ihBeQ%?7Fa?wr^{wdKfexK zcWk)nqF;Wky?;U$GW(mi48Pv;11t^55PMLo3E&H2!?|sY*htdW96Th)vNrWFh1?ob!l2<9~YkL#a)m({i{2q8guIJ3A=~ z{mN+2qH&=(wUxD@P-9A&#Og0C6WDR||6CWzIUt`oqUJN_JcjC`hsQh`7f--lhv8BK zn9j(u&_y65^D_Mil?-_xv4bV9*%~C#5usP;?q9;fzMnUO6i=710Btg1bj=ClL@qB} zEHr+gLVsCS`eQ1lkO=sKEC6v3EZ{aEF=t#>m42I18+;ir06jZRwFvx>wB>UEPuq3P znxEunsK1C<3JIQNyMd6vEt?ZbZ0suPW3U4St>q2wC7LNt)qFDFXIutUy_5{`fTGo+ zLs%fnC}&Kf$r1^WQY~q$7QIGyVX!F*lO=+C?0@+#psxBm1JXqg3irAAB$TMPTdg*F ztmT0oYqi9ZSP5If(ok}#)YFBYuXt5=5mpM!(l5VsHj{%T0c8vD}s zd^NkPJ}dE`Ga-i{zc7Bc(i3t{#A<$Q7PyXDP!17z;)0`;K+HID7^r!b$N?XygBV30 z$A5#Udkrax<-``(jLRQt;Jkg4gdHLsS`;}=s%`t5+agu-X$1|EEX<+*Kx5I+++A2>jz7Kdn&dw=-; z6U7TmT&115s9mITPO7P#qny$Js<6Iw*<>0BxaRDBx#Dz^da4l;DkOH@2z~_`1F!rW z^>*X9aZ)ep)+y-Mi92>0okrVq6d*t1`zs&lJL1AH@De?}%!~Su=4@zshT8m-?_deN zx4A)GV0#YNSkAks>1op)`%1sx5PuKG5_|rDYLFgD=weUVU)R7rB80JqT5sOG$?M){ z)rO^~o)=cYPbGqXr&I9-OMC9H+1DM>PD;2?7n${Ru2}!5Pbo(3YE!^6;93@@;yq`9 zAD!w(fSsQ?k#I$AP%7Zv_-5(D3fFs(Mo)|-R%az6i#22Sc2fp^!xp5W)qhv$S1>lT zGUCo>pqQyE9)sxbaWI{d`!1?qjmOp;Tu;5#{ok{3@5ALvQ`GC|6VoW|<^uDkOD4_mNlmY5$$wcFIWQrZ)@sVAa~XD4N>i(80#puqA(>a`7p84+?EZ=d zQ*A__pZC;aSB954z~~bcx3CaoM!`WIrg)|d!4NMZ#sVZ%_ZRA1CV{F$A{P2lXvJ%S zQk_DDI#p_rK@WW)a<0L)qZ*PJ^pyOCxb(1I+N*VFRa@5oE5Y+!EPp{XhgUcKaYNqA zC)3yz|2b~8GydO`)6>&~|MxlCjZ!!3VmeB-%)*nX1HQ|apdBdMQ4wmDMy+0L647hWPe$D=l)+;Rpsmdb`Gu&Tr{Nr#8A+N_1`%NWP6D`s4S#Ke3~sGOqu--PuyQBM!R z{5w1yqCd3DOlhh5|Er_=ZGD%)FwbMkL^Fy_lrb3r7$3C3R;&ZN=-~lHYdiIA8mO?7 zJ`hvebj)hca(|YMzr2iJH^HZRNvQ1xPT6QmIJ24OGpY{@3HN#)&c|F@Y`XQxLo3&s zEYo9;f*cX=)9A~euW}}e5w6l76t-NKKhSRhlc`yKe5|RnPD9L_S`3SZ5XxsG$xgVE zl5q+18cFsa?73?oR)kYu)+~EJaNSj0ZpwJ<&=;uqJ5>8|=q&NEV-B)datmGA!rF_InALe}s9omMBC|J7-?S_k|8S=t?;P8apG zVebKzwPZpV_I=S@v}==|2{0vCfeRmWxH%4C6{C;d8xqkj)D2y@(w<)?*sRQG5v%zf zE@t`Elk*8r0lJez3O*CF$|e0efi1?6@`bNHeA)zKqKlK13PXR_oT)Qt{2U>59_^x0 zKqc{@XHJl7$CrqN#Dr7fku63X+qyANRw7)R%svc1^Bjw74*G=Gtf!Y%Bc1cmh?DP< zN-JEaT~tp@uIV3TX`;u++`DzSyPKdTo}tIbx|sl1Kt+D`M>D+-AcXl$B*j2Yd)-=` zb?Ui?+J423!$E(Dv+sX6@AXFg@%SGFfG%JMxCLQm*k*_duE+u;N>W)M&)S__nPx=w#&i*6E+II}b@84De3)WOS~ zgqZlZ37SVq$Z<2=E51Nl4&jN{aeN#V*cmBnF$B!8o3KFIog~B2q(vcj%cKrhP!_2m z{eE@D8r1n^n+D!dQ|yak6+MyG2xS6kAobT+t=J=x128Enme066D>iJ>+{P&_1v9f& z{?mAN5&wTR0exa2Q`~Npex2T1LFVDyD3_NUNw@h`~IK zj-*8!xOPZdCf#5LR2KBkq>2M=mXWsdoNoF&* zi*7EicVQzfyM>fPkcyNGbLdQ)V&VoIZsr`soVkukWsKWgO8-5`ZkCc!%_S3{$5!-? zP$GW`c6;FB?l!r=D&ZNpw#sNP@Qj(Z<&FR$L;UiLrtFRN0`=LVKc1N;{22&MB-T_>)LCAROh z5-gnx!(e~-c`&*fe(Vo#wAf#t-~3c^9s+46Ds-BfMWN_u!j1n5MmJB;X6tbkd3%4h zGgG#6W}tanAN_;hROa!^(i=v zKG!j_pYvi-#DaE4GayYc9dtU#^iVhihH_U>LQ z#9Qd*iXeA)t38ocwev+)$62A^;bo6 z|E(X^V^&L+n_;xT$z8~wK3V9(lZX6zEmZMyCd^>cX6NRO!?^!@gzI?4@`{^GuNBNr zJ4dl9-pFHKFPDcfx7i)`#!mSzuo2mYEn!p4F%#G+kN6acY_@Y@FgL-$ODBI#{DoSD z{GZUY-u+6LP5J*Pr^JWIWpWqWHIpH9F&>x_zV*pK zX-{7!#b<}y9V{ib=Og$nARL^y&e3PifKJ&JP1dmGD%TQ>h2gJ83c|2xw!TX;1$$|` z@R2#NDxtep^CG0VIz>I``*VNrfMZve+}!~krE2*sgae0wPMpO)={B2s8#?dZDA@kJ zJ;!tg-;e^Uu>L7?;7J!ktzZ8~tTpSF*=4F(*Lw{>@)E% zgK#Ic5$K}ovE*37EUXMyHZh?yQ*RxaLD|~%EFbCj5l3N4N)q8NnS+11k-kXIU;!70 zY{3`H7AzdtOCC#}yTOaMAmM7w&F0~Vbxwn-pV zEbf{*n9DgcD`)p?x;WP}BCU{}=boAi1C#;O3pk;(@cs}&XM-7KtH=F&umgQdXX=cX zWbg*KC$fPS%jl~9z0ZFkyfvaAj1u04B2DFgzQvaf44E9zLYS~Dzwn)+upnFp*tOz< z`dCU5%R$e3GIh3Kt*2o&uXpp{+s$v(=nC6>y%MKC2(v@vdASV8cc)X_~!ib^7ifJ)!TQU#)X04^xSKiPTk?t$lSwI z;1M>&A|kh+`=jyTYPb`z%#A>$#Ihsn>|bgJFG1GtB;ir!~jV*WSQYlSE?@6S*YIJ&o@%h`!{_Ush-uX?x_jV^*nH!-p z#5RW#lNXO{OF~hX9VPH&8hq}LZb$w1x8tje54YFnH$UYuq-AVP<{{k2UWBL> z+M5RRhKt4bJCF#A#~e5-MwLro^@ch!LfOYu-m@C?-iW}n+v9YOl&n(UcASFA;1P7@hi zIi9PXIqH_yYs5MG{)Z2r-u7>YSH1r2@cd)Hi1My=3l*vaG|yk7KA;@&n#zE0uhODQ zt%C99Rj5)(3(^c^lsG0Nmg1}+AY62ZX$hnH`dxo*Q3{DY-?Fhi2c;qNtd`x>TZ~z& zRLhUA?ad*~-s`(=Dv7?jIU4vKEG?{WZ??V=Ik$)dYD<;*bye-&=d}~AZ^SjjDOt9u zz`$pRDP$+UJ6v?3~*%mirP4A9}N|Z|c?NW)7wi>F7>y&`& zl*oT+=I8eE0J^iWu7wY_R!{oJg^D;l)XIE~TA5vAEYDG6v+gWecuwQY$1`kxyYVe? zbQ-nRa)m7DKH6~qdZj*TL|!T~7I!vzY07HauS6c|gkHQ(s5qWqo;MOPo@9>svbp+= z!lpJmoBBt&(yT>-sI1c5>oi7}Oqw2Z0vCU>vl)dTCRXwL{nklkyd#AbQrpg-m7Iv^ z2P+Q#lXCw_S%f|{f%TW)a<&qs6!WDnw(9%X-q+ysZf>~`Us?iH5LaDWPLl-P7+c+h zDHCH|6JOm_;*-F!4)jUx>hw`^L+REU!)JFEcjBPL?s@V=-NwHux(i#Sp7`~QJ*j`5 zZjJR;c{)%VGD3A_21y1%ON3oXD9XaRhp0cIfUuTkA4LjPZz(CQy%p47h_weN%Ow*x_a2WMrf zrnaewR=Rexh;j&FyI6>db{qu{p5K z72#D%5|y?IUG{mE_Z0mViN%_EFZ+03Rn>+U>iikhYy}muXD$0A3QhC#c3DC@j-ptv zK24!MyLM9Y{oJ}~KLY+2P)dK9iKwiQ_NtH67b*Q(tI3Jdot{rAg+FUmBIKb=|5`F3 zdywGm=H-82kKIX7s>t-i!{PRzf>!nX|CCEWHk|+4M@OCP{XfUYZw}}G=V%+w|IfM+ z=sJXNX$yEE#h;vqZ$;%NFHU$CWnUl+2Q|O{R<8d;0l-E8p5h7Kr2l_gx!-?0ZXNu; z&(m@NjI^N_?*y65#X?20_Jp>ws#X%yO$_+`Oms;2E9d`)P$_%ViU&_ zD6Yb>f!jzriSA+3@8=#=>tCmBum86{32fB=CvP(PzjJbW`2GK9X)mV#V}tUb`45`^ c&<^d;4(-sMx&2=N000XA|Mr5+SpcX20HgCkng9R* delta 5685 zcmV-57Ru?zF2^pAK7S%N@_1hJDO7B(E_eSO!1!S=RQZM1up4Tzjdl{JKHL57?yh*A~6s}WOz>&KlEG*23>%_Lp2 z01843Ih#N=icuF|bxF``H;&qy2~y@jS1+^?%!rU2Zs=$Bt>uYZt*%>L#r!>_me080Zhz#i0U0{DX1aADglpi<4R%>hg3Ajtzjuh7qG zDC*y@Q3#8$0T$>Baa}cL)5Sqk#?g`m4KxO+relg-0f0`I6VZJE`$dT0rZULnOSVpB8@SqS+J=RBg%xPPAhP-+wCv>fh$sKzJQ&Q6L# zzcL!MXk02zZDnmJ)R@;W3ZK#S?JXVYt!& zrZciEbQuWAyi7krB|{!a>|lv&wgyRbMCcW|`V31t1QB1>6QC=8Vg#(r+_rgD>L+pl7G47J(maswIupqSxpy4K_t#vP5u?J%8T?)KPzDKsx9_;l2=`gc9|mR_h2o z*787)wOV3Htc0y#X(+i=>giI?SG=k_2rC6<>6d1+amt~Ek8S86^+7B%e>J3JjeY5P zzM9=tpOyH}nUDjJUl_kz=?OU}Vl_WD3tUGnD2E6)jU5hN?*)N&40oYMYdP^y*j@R4LL@KF0| zEEc+hWe3&uPQjTAjkJdHv;@^dip*EN6}Ecso(L(T7ciZIEjy?_U}Jj@PT)el68>;; z_(~YRIu+{)WNLbA&8sE#_{9{Ge%q%mq41fxfd?O8d12ci#Lq;|`wmd4#UWbc9)G_7 zMDYR>S81mXIx12*r`1%>aZYIfRaoD;Y%&c53^}`Bt~i~fo@#`I3W;4ef?t8gz$^bo z{it!$IIS0TYZvru#~s^^cB5rF3XmW1{k0GD9dTh0c!{1~=0*KSb2c&=@tdENW0 z+OYK0^TG=FsYLMabSl1JY0n)t`?@3ANeLI~BD0<@6zd=LDaFWLZ3a1jBv1aVvPRhV<*n%{)`hN=j3dV+3 zM%={=6f#4W8|9dv>ez;m`ih3QrLMt7Fj2BBi{(&;3 zP{co8qi*l*r}wW>fAH@5HTwBtH0TfBzec^$=z65gFl9@J>P0i59>jmwTtb6;H5#Q# zU638rTwvaG$)p)Rsp&N>Ie+UQ2POp5T1^>sF2l}BX=*i1fXaS1B=ZXW!n6&J-Ct3E zs*UK2i>_Mi%J4D=7=41`78ZicC^*Q&6wj0)7~)05Sb&7;{z9G0Bv5ro#6mv`t$0mP zs#B;?r%Dad@1ie6&NbL}R6`Q|u9CkHmmbzjd$kU&YRme6C3wDzC4Y$K;QFRFZpeH2 zWEz{|KPRoDjQ{ua?CkX5|9y^jqtwm1n2u5{v+yM9fbX&;Xa|aRRD@ckQLEiBWY59A z+knh?HYApTc#V7)umE*_?9@WVYD*Vc7L5$o^M?o27=eastjt;T_^4Pi$Sg~Q8{v5! z`+mkoSD120(BoqrS$~$^x&PNyRr&hAorCKG7Y*q@F%-06{kMPN; zJv=o3i5A4`XoFx%T#!p2&OJ3P_8dCspUua|T1|uX?tM%hZ70ys^Q%n@oid=|9v&>@ zz?4uxb#3n{xH_`pS2%DbYM3koy%rmvO~g?_UN3-i;sE+X8GohJMrX(RpTskUAgWt2 zGN6wBx8|*OseQ_Z7sH)mHH<;g!0))vJ;@<@*sRQG5v%zf zE@t`6lky2q0XmaH3O*CE$|e0efi1?6@`bNHeA)zKqJxu@3PXQG&eRz+evXhjk9N=~ zpptmdH7Cfm<4Z(BV#2BL$QC1xZQYnBD-o_uW*>&1d5*=9gFfLk>*;0HNas8>;^e!e z(hApU2h|gkYx+l7n&|N{_ii2T&L(JyXXx>eAw$DgM+#}%*+-D96vzxm?)w6zXm5^y2Uk6J}QVSLY;DdYb9Cq@|`ejb7C zV&a)dUycEwi|iLLf*;Os>K!scw+?ECS)&cX?7OTC4HJLaqMO7c&MZw!#zF@hb?`DL zAtwH9g62^Ya@-8}iZ76sLwMqK93Mvoc1Frt3;{FjCM=Mnc9P*}(xQ;NWm1PLD2r5( ze!se64eI=|O#| z|7kqCi2r{Y0avYjb?hMiDQ-7PzfSM1AoKcUvR6^hW6aB&hS^cDcf((8yR#N$A{S;p z6z@Mk9iI^@&%giSa@@b|4Z6eY{@|u)M+_6SR*R+8(EoLc$tKdeiYXf}(kki}VlYpm zBWckFt{sw=NjI1Ql?A;ssUpRQVhonhUiJb+ne2a(5{`6sok^ayjPujV4l6M+g}Haz zVUDegis;uEAUcK50?(Jr6ulDkh53LP%u4^|By?pRn+(3Es7JKgpAE$CnRh;ClG)7d zpqtC#E^MS_w~%rOQjv0D4xMRJOx%FO&76anGuJVxjB%Sw>Awfr$xkkh3`1?vSZapt0|d%#Qq0Zj$oPxh(1f0@5;fk)ZwrIPABTB*(-P#WmhED_HlXCb(`=TT65-ZHA**fTyfRd;X9a&RV&JEno8kC&IJ*A#e~RhnwO9gg25I5` zvVp@Y3~tZgHteN^>izZYxclMu>iYfdRqu1}sw&ldZeYnYz;CgRP&yCQb>hlcV*5@j z!P2QP4E6?}`=jf@$KK#Zi~aE8=BJYL5J)>wq0`hX3Pnc~Zv0m;x_N>&TaT;A+p~Y2 znX;WT11&@pujT!I6(QU;#;)Q_2)Ahims$1B0Xx^!T^8tv^(!gCtoAK7O!gF-#e-3{haO~=myE~wxR4t!{aNrQoiL=-zon}*SLl@l}1>3u~ z=a|mm8&Y5u)<0zqJn2HH_3Qt*b@t{slmGwb^z`_!{-2|T`F|KZM#r#2`k$7LeJ1{8 z5bmTl0$o%+mK;l%g_YsTCMI-d>a8O)C|kRpNg~`Ob1;85(ih1YEa2jh zE%;*Df`tQniG;q81;*2YQg9tZ5kOpDo=y=Q_PZ%}()v8Y1fWAjw5vxsVA1(vn*>6| z;;yNKxtueza(2(Ai*r3A(hAu{?y0#jKp8;YfD<|k?++n#)}K+fdfdAQJJ7durp|at z25*3SA{%J2jIQe4`y7A5TO$g>DB*1=(p3KETYTBTfXO~BgbB;?3*RXU3&Lf9T`MlA zkEJBB9Q3>=Q)dg-dKzZ)dN&Wg-TX#9p1o!^s+qZ5lie6ziDE_fkyyT3?1vnVavy8X zW0<*mF={sRJWQROJGPmg>ZbZa4KYvpL`&r>l->n#VWtjB8JB;3scW>`d)vPl+`b!K z4{mycZn2BBP8lUs0>`!_i(HbT!PFm&Z!WH`Zr@&AzkT; z9$`Z)B69n=HyZb^2RjkV+z3=kEIYFHK8DubIJWj)2G`zpbnC*rIhsZ0KpCNN@8oN*sB+2)tohjXK~)H9Guh0cg>+Kz5u#{ycJx)wqJj#+ejcU2}NCYl)$e+eS2w) zv(n{)g_U|G7819rfPdj;}92+zu~pe#&D=%h;OCL%5H<2vIAv zHx1?u7mM$AAQ2XiIdEQ#Dx=aDA|`*nN>V}GIrT->qez18ssjs_;-Q-08LA1)KDVzrg7ncf*)x5vSc}A*CNj8k zJXbq&)Ge*oh;#n^4}N#m8O|y&grA3um z1>?=DP^FL-q#4L4aZE@o#d$$Mxabbk5=Qm)yV`%E6cT&BWn+5|N<-#ZExW0=7_(NX zmLFf+n?snr*LU4i5`A@ZH1IoET3FxSY<(eeZV?C6mMZh>s@lEJYbRXah--#ZvTRd< zf!*qn%!HIyB5hfyvX!~NTZzh{P~=c3l0#LpEpEn|-W?B>D3$ozr4l7=HB=YZDFN3h zk<)+7&+X*_bZ28-3m)f|mH8UAGP}lDo}K5I^^bC;S&IZwS*5wxX^bwJG(F}7E@XdaGYUaWtm60kt&_@lM+z&Xww*sKIT6th zRvi2%<^Geh2z_b->o33MY$Zx5=1W~{)%UTzufgZt+;Shjv;?XkuDZ6ICJDMRwz>&Z zCdRrZzPhQzCxK%f=#$*l>7(R^(ycXy&+aVl#6gMO^W=%Tjek>g7q&`0@#`6TQayj& z8tbj{bfj_{R=;DF7LQUh?@-Y9G6j9xn`IlMu+sieT^w`!hqcx+TgzIRu~((o^siL~ zSziCcC)l?3atZi``oFW|v*Yaj-*1i%_kTZ2%LuUg=nZo*rM|d7EdDb;^D|;DqzZ&I zG!EpPaq_#~r4n_6YpaHI&(LPD;9h?Mr`Szirm8Q#-MxO0tEJu1KMsI>qD%fn2mMm7 z|Ee)1?B-wU{;I|=v;ae(1@MUmn1QUnMs=46{eNvjtAChzQvY()z7M+J4g~cboRz7X z+NL5}>DtXA${~d9Vj(Jyv*MUw0aWfVbKz)Sqq<*4Zu78gLDXio*8x2Vq0N67cL+@` zi6R=hHf}GIRr zgjXp^RN5wV+2>W>Q}kCP7Hj6c?Bjh^RU2NY^Jh@A6;#BYwd|89G|kW3WeM##iekO` zG==)?+DXaxbL*!42>4$>DP?~qqOwBTt3Fa+r1Wd8CMQaFdOoET{;XArkcTq;YsrM{ zL4voNm;Zr1b|*opBGV5Khueb+TGjLaQ!WA7aQ;6!K5l34|2a81JDmTYqis0kz)BE#QR|e{vqa6_ua7IN@28eSt6>)cpQix&99Y02={#iYIuJ{%?Qfe*f{Lb@2Z_ zPs;@`(uQ8V6J#~^cHba6S9_zO&m+0 zxC+MxZX@X=x`$1_pL Date: Thu, 29 Feb 2024 07:46:34 +0000 Subject: [PATCH 083/157] fix: ensure azure cloud config could be loaded from secret --- pkg/blob/azure.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blob/azure.go b/pkg/blob/azure.go index 13a2425bc..4de7c370b 100644 --- a/pkg/blob/azure.go +++ b/pkg/blob/azure.go @@ -60,7 +60,7 @@ func GetCloudProvider(ctx context.Context, kubeClient kubernetes.Interface, node if kubeClient != nil { az.KubeClient = kubeClient klog.V(2).Infof("reading cloud config from secret %s/%s", secretNamespace, secretName) - config, err := configloader.Load[azure.Config](ctx, &configloader.K8sSecretLoaderConfig{ + config, err = configloader.Load[azure.Config](ctx, &configloader.K8sSecretLoaderConfig{ K8sSecretConfig: configloader.K8sSecretConfig{ SecretName: secretName, SecretNamespace: secretNamespace, From a8e18539a0eb0aad9c974093bce965d71e34ec77 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 1 Mar 2024 19:01:56 +0800 Subject: [PATCH 084/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index 2794eb817..c494cd643 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,5 +1,5 @@ # workload identity support on static provisioning - - supported from v1.30.0 + - supported from v1.24.0 This feature is specifically designed for blobfuse mount and is not available for NFS mount as NFS mount does not require credentials. There is a standalone blobfuse mount for every pod, it may cause performance issues when multiple pods are present on a single node. From db4d8c92452322b2ada5352e6745338bca0f889a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 16:16:58 +0000 Subject: [PATCH 085/157] chore(deps): bump github.com/stretchr/testify from 1.8.4 to 1.9.0 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.4 to 1.9.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.4...v1.9.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../testify/assert/assertion_compare.go | 28 ++- .../assert/assertion_compare_can_convert.go | 16 -- .../assert/assertion_compare_legacy.go | 16 -- .../testify/assert/assertion_format.go | 32 ++- .../testify/assert/assertion_forward.go | 59 +++-- .../stretchr/testify/assert/assertions.go | 207 +++++++++++------- .../testify/assert/http_assertions.go | 27 ++- .../stretchr/testify/require/require.go | 65 ++++-- .../testify/require/require_forward.go | 59 +++-- vendor/modules.txt | 4 +- 12 files changed, 324 insertions(+), 195 deletions(-) delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go diff --git a/go.mod b/go.mod index 40551ca2f..84564a87a 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/satori/go.uuid v1.2.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 golang.org/x/net v0.21.0 google.golang.org/grpc v1.59.0 diff --git a/go.sum b/go.sum index 9e716da1b..51fc88311 100644 --- a/go.sum +++ b/go.sum @@ -263,8 +263,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d..4d4b4aad6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c79..3ddab109a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec5..a84e09bd4 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba9..0b7570f21 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a..861ed4b7c 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f852147..506a82f80 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330..eee8310a5 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/modules.txt b/vendor/modules.txt index 73d4e5a92..65bf3ac5f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -435,8 +435,8 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/testify v1.8.4 -## explicit; go 1.20 +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require # go.etcd.io/etcd/api/v3 v3.5.10 From e7388d74a13c9665ccd1ba82d8ab98b0cdba6a2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 16:40:27 +0000 Subject: [PATCH 086/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.16.0 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.15.0 to 2.16.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.15.0...v2.16.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 8 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 19 ++ vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 13 +- .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 +++++++++ .../ginkgo/internal/profiles_and_reports.go | 42 +-- .../onsi/ginkgo/v2/internal/node.go | 33 ++- .../onsi/ginkgo/v2/internal/suite.go | 14 +- .../onsi/ginkgo/v2/reporting_dsl.go | 59 +++- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/golang.org/x/tools/cover/profile.go | 266 ++++++++++++++++++ vendor/modules.txt | 5 +- 12 files changed, 527 insertions(+), 67 deletions(-) create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/golang.org/x/tools/cover/profile.go diff --git a/go.mod b/go.mod index 84564a87a..198c4df23 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.15.0 + github.com/onsi/ginkgo/v2 v2.16.0 github.com/onsi/gomega v1.31.1 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 @@ -136,7 +136,7 @@ require ( golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.17.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/go.sum b/go.sum index 51fc88311..d2e4d7b79 100644 --- a/go.sum +++ b/go.sum @@ -209,8 +209,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= +github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -388,8 +388,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 9a65dd10c..b79ee9c5a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,22 @@ +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + ## 2.15.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70ecc..0e633f309 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 000000000..3c5079ff4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 26de28b57..5f35864dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -// loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc227..6a15f19ae 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 2b4db48af..a994ee3d6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +840,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -918,9 +918,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -959,7 +959,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) - //and now we wait for the grace period + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2d..aa1a35176 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index ed9346474..675f8db2f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.15.0" +const VERSION = "2.16.0" diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 000000000..47a9a5411 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 65bf3ac5f..81bef22f3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -344,7 +344,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.15.0 +# github.com/onsi/ginkgo/v2 v2.16.0 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -624,8 +624,9 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.16.1 +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 +golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 From 540f9fd3ec1f47c20ece4e027b2e963997aa694d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 16:11:24 +0000 Subject: [PATCH 087/157] chore(deps): bump golang.org/x/net from 0.21.0 to 0.22.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.21.0 to 0.22.0. - [Commits](https://github.com/golang/net/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +- go.sum | 16 +-- .../x/crypto/internal/poly1305/sum_ppc64le.s | 14 ++- vendor/golang.org/x/net/http2/transport.go | 9 ++ vendor/golang.org/x/net/websocket/client.go | 55 ++++++++--- vendor/golang.org/x/net/websocket/dial.go | 11 ++- vendor/golang.org/x/sys/unix/aliases.go | 2 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 ++- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 +++++++++++++++++++ .../golang.org/x/sys/unix/zsyscall_linux.go | 10 ++ vendor/golang.org/x/sys/unix/ztypes_linux.go | 60 +++++++++++ vendor/modules.txt | 8 +- 13 files changed, 261 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index 198c4df23..c1e966273 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 - golang.org/x/net v0.21.0 + golang.org/x/net v0.22.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 k8s.io/api v0.29.2 @@ -128,12 +128,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect diff --git a/go.sum b/go.sum index d2e4d7b79..f92992336 100644 --- a/go.sum +++ b/go.sum @@ -325,8 +325,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -343,8 +343,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -364,13 +364,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5deeb..b3c1699bf 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c..c2a5b44b3 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7ee..1e64157f3 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a4..8a2d83c47 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd..b0e419857 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc69937..2f0fa76e4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4db..2b57e0f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e6..5682e2628 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d2712..87d8612a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955ee..eff6bcdef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1550,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( diff --git a/vendor/modules.txt b/vendor/modules.txt index 81bef22f3..0cb119805 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -536,7 +536,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.19.0 +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -557,7 +557,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.22.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -580,14 +580,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.17.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 From df475475d931d7b151d3ad44b57e51d2e0e9c3bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:51:00 +0000 Subject: [PATCH 088/157] chore(deps): bump github.com/golang/protobuf from 1.5.3 to 1.5.4 Bumps [github.com/golang/protobuf](https://github.com/golang/protobuf) from 1.5.3 to 1.5.4. - [Release notes](https://github.com/golang/protobuf/releases) - [Commits](https://github.com/golang/protobuf/compare/v1.5.3...v1.5.4) --- updated-dependencies: - dependency-name: github.com/golang/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 12 +- .../golang/protobuf/jsonpb/decode.go | 1 + .../golang/protobuf/jsonpb/encode.go | 1 + .../protoc-gen-go/descriptor/descriptor.pb.go | 128 +- .../github.com/golang/protobuf/ptypes/any.go | 7 +- .../encoding/protojson/well_known_types.go | 4 + .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults}/editions_defaults.binpb | 2 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/filedesc/desc.go | 67 +- .../protobuf/internal/filedesc/desc_init.go | 52 + .../protobuf/internal/filedesc/desc_lazy.go | 28 + .../protobuf/internal/filedesc/editions.go | 142 ++ .../protobuf/internal/genid/descriptor_gen.go | 152 +- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/codec_extension.go | 22 +- .../protobuf/internal/impl/codec_tables.go | 2 +- .../internal/impl/message_reflect_field.go | 2 +- .../protobuf/internal/strs/strings.go | 2 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/reflect/protodesc/desc_init.go | 42 +- .../reflect/protodesc/desc_resolve.go | 4 +- .../reflect/protodesc/desc_validate.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 131 +- .../protobuf/reflect/protoreflect/proto.go | 2 + .../reflect/protoreflect/source_gen.go | 2 - .../types/descriptorpb/descriptor.pb.go | 1254 ++++++++--------- .../protobuf/types/dynamicpb/dynamic.go | 2 +- .../types/gofeaturespb/go_features.pb.go | 177 +++ .../types/gofeaturespb/go_features.proto | 28 + vendor/modules.txt | 8 +- 34 files changed, 1587 insertions(+), 787 deletions(-) create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go rename vendor/google.golang.org/protobuf/{reflect/protodesc => internal/editiondefaults}/editions_defaults.binpb (69%) create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto diff --git a/go.mod b/go.mod index c1e966273..83fa381a8 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/container-storage-interface/spec v1.8.0 github.com/gofrs/uuid v4.4.0+incompatible // indirect - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/ginkgo/v2 v2.16.0 github.com/onsi/gomega v1.31.1 @@ -28,7 +28,7 @@ require ( go.uber.org/mock v0.4.0 golang.org/x/net v0.22.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/protobuf v1.33.0 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 diff --git a/go.sum b/go.sum index f92992336..0f33d2c7a 100644 --- a/go.sum +++ b/go.sum @@ -136,16 +136,14 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -404,10 +402,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 6c16c255f..c6f66f103 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -56,6 +56,7 @@ type Unmarshaler struct { // implement JSONPBMarshaler so that the custom format can be produced. // // The JSON unmarshaling must follow the JSON to proto specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index 685c80a62..e9438a93f 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -55,6 +55,7 @@ type Marshaler struct { // implement JSONPBUnmarshaler so that the custom format can be parsed. // // The JSON marshaling must follow the proto to JSON specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 63dc05785..a5a138613 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -12,6 +12,31 @@ import ( // Symbols defined in public import of google/protobuf/descriptor.proto. +type Edition = descriptorpb.Edition + +const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN +const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2 +const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3 +const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023 +const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024 +const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY +const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY +const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY +const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY +const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY +const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX + +var Edition_name = descriptorpb.Edition_name +var Edition_value = descriptorpb.Edition_value + +type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState + +const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION +const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED + +var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name +var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value + type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE @@ -39,8 +64,8 @@ var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_val type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL -const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value @@ -72,6 +97,31 @@ const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value +type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention + +const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN +const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME +const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE + +var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name +var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value + +type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType + +const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN +const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE +const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE +const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE +const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD +const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF +const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM +const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY +const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE +const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD + +var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name +var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value + type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN @@ -81,10 +131,77 @@ const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value +type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence + +const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN +const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT +const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT +const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED + +var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name +var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value + +type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType + +const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN +const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN +const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED + +var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name +var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value + +type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding + +const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED +const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED + +var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name +var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value + +type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation + +const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN +const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY +const FeatureSet_NONE = descriptorpb.FeatureSet_NONE + +var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name +var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value + +type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding + +const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN +const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED +const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED + +var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name +var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value + +type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat + +const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN +const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW +const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT + +var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name +var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value + +type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic + +const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE +const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET +const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS + +var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name +var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value + type FileDescriptorSet = descriptorpb.FileDescriptorSet type FileDescriptorProto = descriptorpb.FileDescriptorProto type DescriptorProto = descriptorpb.DescriptorProto type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions + +const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification + type FieldDescriptorProto = descriptorpb.FieldDescriptorProto type OneofDescriptorProto = descriptorpb.OneofDescriptorProto type EnumDescriptorProto = descriptorpb.EnumDescriptorProto @@ -103,7 +220,6 @@ const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_Optimiz const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices -const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas @@ -118,8 +234,10 @@ type FieldOptions = descriptorpb.FieldOptions const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak +const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact type OneofOptions = descriptorpb.OneofOptions type EnumOptions = descriptorpb.EnumOptions @@ -129,6 +247,7 @@ const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecat type EnumValueOptions = descriptorpb.EnumValueOptions const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated +const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact type ServiceOptions = descriptorpb.ServiceOptions @@ -140,12 +259,17 @@ const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Depr const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel type UninterpretedOption = descriptorpb.UninterpretedOption +type FeatureSet = descriptorpb.FeatureSet +type FeatureSetDefaults = descriptorpb.FeatureSetDefaults type SourceCodeInfo = descriptorpb.SourceCodeInfo type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f5736..fdff3fdb4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 25329b769..4b177c820 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -322,6 +322,10 @@ func (d decoder) skipJSONValue() error { if open > d.opts.RecursionLimit { return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") } if open == 0 { return nil diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 000000000..14656b65a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb similarity index 69% rename from vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb rename to vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 1a8610a84..18f075687 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -1,4 +1,4 @@ -  (0æ +  (0æ   (0ç   (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6ebe..d2b3ac031 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 193c68e8f..8826bcf40 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -68,7 +68,7 @@ type ( Extensions Extensions Services Services - EditionFeatures FileEditionFeatures + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage @@ -76,10 +76,13 @@ type ( Locations SourceLocations } - FileEditionFeatures struct { + EditionFeatures struct { // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool @@ -95,6 +98,9 @@ type ( // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool } ) @@ -156,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -217,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -250,8 +260,7 @@ type ( Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor - // Edition features. - Presence bool + EditionFeatures EditionFeatures } Oneof struct { @@ -261,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -310,26 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + if fd.L1.Cardinality == protoreflect.Repeated { + return false } - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -378,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -404,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9d..237e64fd2 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a75..482a61cc1 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 000000000..0375a49d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 8f94230ea..40272c893 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -18,6 +18,21 @@ const ( Edition_enum_name = "Edition" ) +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -213,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -297,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -474,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -497,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -523,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -543,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -639,24 +693,59 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + // Names for google.protobuf.FieldOptions.EditionDefault. const ( FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" @@ -813,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -909,36 +1005,79 @@ const ( FeatureSet_FieldPresence_enum_name = "FieldPresence" ) +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.EnumType. const ( FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" FeatureSet_EnumType_enum_name = "EnumType" ) +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. const ( FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" ) +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.Utf8Validation. const ( FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" FeatureSet_Utf8Validation_enum_name = "Utf8Validation" ) +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.MessageEncoding. const ( FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" FeatureSet_MessageEncoding_enum_name = "MessageEncoding" ) +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.JsonFormat. const ( FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" FeatureSet_JsonFormat_enum_name = "JsonFormat" ) +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" @@ -1085,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 000000000..fd9015e8e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b2..ad6f80c46 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea0..49bc73e25 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc5..2b8f122c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3aa..13077751e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60e..986322b19 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e7658..a6e7df244 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index d8f48faff..a50fcfb49 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 32 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index aff6fd490..b3278163c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -139,12 +164,16 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc } if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } // We reuse the existing field because the old option `[packed = // true]` is mutually exclusive with the editions feature. - if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + if canBePacked(fd) { f.L1.HasPacked = true - f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked } // We pretend this option is always explicitly set because the only @@ -155,9 +184,9 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc // requested from the descriptor). // In proto2/proto3 syntax HasEnforceUTF8 might be false. f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated - if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { f.L1.Kind = protoreflect.GroupKind } } @@ -175,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e3501..254ca5854 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d5648..e4dcaf876 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 7352926ca..2a6b29d17 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -5,14 +5,16 @@ package protodesc import ( - _ "embed" "fmt" "os" "sync" + "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) const ( @@ -20,14 +22,12 @@ const ( SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 ) -//go:embed editions_defaults.binpb -var binaryEditionDefaults []byte var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) func init() { - err := proto.Unmarshal(binaryEditionDefaults, defaults) + err := proto.Unmarshal(editiondefaults.Defaults, defaults) if err != nil { fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) os.Exit(1) @@ -83,95 +83,66 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { return fs } -func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.FieldPresence == nil { - return fileDesc.L1.EditionFeatures.IsFieldPresence +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) } - return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT -} - -func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.RepeatedFieldEncoding == nil { - return fileDesc.L1.EditionFeatures.IsPacked + if child == nil { + return parentFS } - return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED -} - -func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.Utf8Validation == nil { - return fileDesc.L1.EditionFeatures.IsUTF8Validated + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED } - return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY -} - -func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.MessageEncoding == nil { - return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN } - return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED -} -// initFileDescFromFeatureSet initializes editions related fields in fd based -// on fs. If fs is nil it is assumed to be an empty featureset and all fields -// will be initialized with the appropriate default. fd.L1.Edition must be set -// before calling this function. -func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { - dfs := getFeatureSetFor(fd.L1.Edition) - if fs == nil { - fs = &descriptorpb.FeatureSet{} + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED } - var fieldPresence descriptorpb.FeatureSet_FieldPresence - if fp := fs.FieldPresence; fp != nil { - fieldPresence = *fp - } else { - fieldPresence = *dfs.FieldPresence - } - fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fieldPresence == descriptorpb.FeatureSet_EXPLICIT - - var enumType descriptorpb.FeatureSet_EnumType - if et := fs.EnumType; et != nil { - enumType = *et - } else { - enumType = *dfs.EnumType + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY } - fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN - var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding - if rfe := fs.RepeatedFieldEncoding; rfe != nil { - respeatedFieldEncoding = *rfe - } else { - respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED } - fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED - var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation - if utf8val := fs.Utf8Validation; utf8val != nil { - isUTF8Validated = *utf8val - } else { - isUTF8Validated = *dfs.Utf8Validation + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY - var messageEncoding descriptorpb.FeatureSet_MessageEncoding - if me := fs.MessageEncoding; me != nil { - messageEncoding = *me - } else { - messageEncoding = *dfs.MessageEncoding + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } } - fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED - var jsonFormat descriptorpb.FeatureSet_JsonFormat - if jf := fs.JsonFormat; jf != nil { - jsonFormat = *jf - } else { - jsonFormat = *dfs.JsonFormat - } - fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index ec6572dfd..00b01fbd8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -175,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 0c045db6a..7dcc2ff09 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 38daa858d..78624cf60 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -64,6 +64,7 @@ const ( // should not be depended on, but they will always be time-ordered for easy // comparison. Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be // used or relyed on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 @@ -71,31 +72,39 @@ const ( Edition_EDITION_99997_TEST_ONLY Edition = 99997 Edition_EDITION_99998_TEST_ONLY Edition = 99998 Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 ) // Enum value maps for Edition. var ( Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, + "EDITION_2024": 1001, "EDITION_1_TEST_ONLY": 1, "EDITION_2_TEST_ONLY": 2, "EDITION_99997_TEST_ONLY": 99997, "EDITION_99998_TEST_ONLY": 99998, "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, } ) @@ -954,21 +963,21 @@ type FeatureSet_Utf8Validation int32 const ( FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_NONE FeatureSet_Utf8Validation = 1 FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 ) // Enum value maps for FeatureSet_Utf8Validation. var ( FeatureSet_Utf8Validation_name = map[int32]string{ 0: "UTF8_VALIDATION_UNKNOWN", - 1: "NONE", 2: "VERIFY", + 3: "NONE", } FeatureSet_Utf8Validation_value = map[string]int32{ "UTF8_VALIDATION_UNKNOWN": 0, - "NONE": 1, "VERIFY": 2, + "NONE": 3, } ) @@ -1643,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -2195,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -2244,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -2352,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2472,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2496,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2655,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -4104,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -4451,7 +4443,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, @@ -4468,337 +4460,355 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, - 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, - 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, - 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, - 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, - 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, - 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, - 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, - 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, - 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, - 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, - 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, - 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, - 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, - 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, - 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, - 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, - 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, - 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, - 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, - 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, - 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, - 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, - 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, - 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, - 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, - 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, - 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, @@ -4807,276 +4817,258 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, - 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, - 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, - 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, - 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, - 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, - 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, - 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, - 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, - 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, - 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, - 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, - 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, - 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, - 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, - 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, - 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, - 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, - 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, - 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, - 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, - 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, - 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, - 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, - 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, - 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, - 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, - 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index 54ae1ab87..39b024b46 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -499,7 +499,7 @@ func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { return v.List().Len() > 0 case fd.ContainingOneof() != nil: return true - case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension(): + case !fd.HasPresence() && !fd.IsExtension(): switch fd.Kind() { case protoreflect.BoolKind: return v.Bool() diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 000000000..25de5ae00 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 000000000..d24657129 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0cb119805..58a34e313 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -228,8 +228,8 @@ github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/descriptor github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -704,7 +704,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.32.0 +# google.golang.org/protobuf v1.33.0 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -712,6 +712,7 @@ google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -736,6 +737,7 @@ google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/dynamicpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb From 8cf2b4b81df0624dd9bff975cadae8c41cf5ce41 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 9 Mar 2024 14:59:25 +0000 Subject: [PATCH 089/157] fix: nfs volume created in storage account that has blobfuse volume --- go.mod | 20 +++---- go.sum | 16 ++--- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 17 ++++++ .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 21 ++++++- .../Azure/azure-sdk-for-go/sdk/azcore/etag.go | 9 +++ .../internal/exported/response_error.go | 32 ++++++---- .../sdk/azcore/internal/log/log.go | 20 +++++-- .../sdk/azcore/internal/shared/constants.go | 2 +- .../azure-sdk-for-go/sdk/azcore/log/log.go | 5 ++ .../sdk/azcore/runtime/errors.go | 8 +++ .../sdk/azcore/runtime/request.go | 9 ++- vendor/modules.txt | 20 +++---- .../pkg/provider/azure_controller_standard.go | 5 +- .../pkg/provider/azure_controller_vmss.go | 5 +- .../pkg/provider/azure_controller_vmssflex.go | 5 +- .../pkg/provider/azure_mock_vmsets.go | 2 +- .../pkg/provider/azure_storageaccount.go | 22 ++++--- .../pkg/provider/azure_vmsets.go | 2 +- .../pkg/provider/config/azure_auth.go | 59 +++++++++++-------- 19 files changed, 194 insertions(+), 85 deletions(-) diff --git a/go.mod b/go.mod index 83fa381a8..60ef76a63 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 @@ -32,13 +32,13 @@ require ( k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 - k8s.io/component-base v0.29.0 + k8s.io/component-base v0.29.2 k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 sigs.k8s.io/yaml v1.4.0 ) @@ -146,14 +146,14 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/apiserver v0.29.0 - k8s.io/cloud-provider v0.29.0 // indirect - k8s.io/component-helpers v0.29.0 // indirect - k8s.io/controller-manager v0.29.0 // indirect - k8s.io/kms v0.29.0 // indirect + k8s.io/apiserver v0.29.2 + k8s.io/cloud-provider v0.29.2 // indirect + k8s.io/component-helpers v0.29.2 // indirect + k8s.io/controller-manager v0.29.2 // indirect + k8s.io/kms v0.29.2 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.29.0 // indirect + k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf diff --git a/go.sum b/go.sum index 0f33d2c7a..a4b8006d8 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= @@ -444,8 +444,8 @@ k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8 k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= -k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= +k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= +k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= @@ -462,12 +462,12 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 h1:Hs8BBRzo/V4Qe8m1NG997vaavBb0WSPzKsxdXtjfUpg= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708/go.mod h1:GJ+Crn1rYpKB21TNlnJQKsRWvIHva7oMApx2RhubFao= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 h1:rY8tqbl7oTnsMVdrRshRfWbtrVNAJ6wPjJvt/iecNvI= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8/go.mod h1:7YA0Tq3FmmhaMfVY/2VH+vZgsRsYgbtE3P5+a6UOyAk= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf h1:pyQe3MFQp1Hb8iya4LaZiBEh3M0RgZ/yv0/ws5memP4= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf/go.mod h1:cyVxBbpj/UR4Vi4KhxO3vhzBB2JcN08TngoPQ0J0aWc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 h1:dKAXbaoIRkknnvirDOb4BZLocx4UVtshb9zg3dgZxaE= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708/go.mod h1:8Y2dnp0FlQcoFllRnUgTZjMykhMAUF7NrPLgw30Ae4I= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 h1:8vkgKNPp/DScHQCe8xnch5DaNNZAj2Mw93Mkx1CXSB4= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8/go.mod h1:Lgaq/yv2awggDs6zFF85347o7L1ucRKx57b55qjh470= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 7a0a524e3..bf0c3e1aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,22 @@ # Release History +## 1.10.0 (2024-02-29) + +### Features Added + +* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created. +* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code. +* Added type `MatchConditions` for use in conditional requests. + +### Bugs Fixed + +* Fixed a potential race condition between `NullValue` and `IsNullValue`. +* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`. + +### Other Changes + +* Update dependencies. + ## 1.9.2 (2024-02-06) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 8eef8633a..9d1c2f0c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -8,6 +8,7 @@ package azcore import ( "reflect" + "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -41,13 +42,28 @@ func NewSASCredential(sas string) *SASCredential { } // holds sentinel values used to send nulls -var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} +var nullables map[reflect.Type]any = map[reflect.Type]any{} +var nullablesMu sync.RWMutex // NullValue is used to send an explicit 'null' within a request. // This is typically used in JSON-MERGE-PATCH operations to delete a value. func NullValue[T any]() T { t := shared.TypeOfT[T]() + + nullablesMu.RLock() v, found := nullables[t] + nullablesMu.RUnlock() + + if found { + // return the sentinel object + return v.(T) + } + + // promote to exclusive lock and check again (double-checked locking pattern) + nullablesMu.Lock() + defer nullablesMu.Unlock() + v, found = nullables[t] + if !found { var o reflect.Value if k := t.Kind(); k == reflect.Map { @@ -72,6 +88,9 @@ func NullValue[T any]() T { func IsNullValue[T any](v T) bool { // see if our map has a sentinel object for this *T t := reflect.TypeOf(v) + nullablesMu.RLock() + defer nullablesMu.RUnlock() + if o, found := nullables[t]; found { o1 := reflect.ValueOf(o) v1 := reflect.ValueOf(v) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go index 23ea7e7c8..2b19d01f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -46,3 +46,12 @@ func (e ETag) WeakEquals(other ETag) bool { func (e ETag) IsWeak() bool { return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") } + +// MatchConditions specifies HTTP options for conditional requests. +type MatchConditions struct { + // Optionally limit requests to resources that have a matching ETag. + IfMatch *ETag + + // Optionally limit requests to resources that do not match the ETag. + IfNoneMatch *ETag +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index f24355288..bd348b868 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -20,31 +21,40 @@ import ( // NewResponseError creates a new *ResponseError from the provided HTTP response. // Exported as runtime.NewResponseError(). func NewResponseError(resp *http.Response) error { - respErr := &ResponseError{ - StatusCode: resp.StatusCode, - RawResponse: resp, - } - // prefer the error code in the response header if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { - respErr.ErrorCode = ec - return respErr + return NewResponseErrorWithErrorCode(resp, ec) } // if we didn't get x-ms-error-code, check in the response body body, err := exported.Payload(resp, nil) if err != nil { + // since we're not returning the ResponseError in this + // case we also don't want to write it to the log. return err } + var errorCode string if len(body) > 0 { - if code := extractErrorCodeJSON(body); code != "" { - respErr.ErrorCode = code - } else if code := extractErrorCodeXML(body); code != "" { - respErr.ErrorCode = code + if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { + errorCode = fromJSON + } else if fromXML := extractErrorCodeXML(body); fromXML != "" { + errorCode = fromXML } } + return NewResponseErrorWithErrorCode(resp, errorCode) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Exported as runtime.NewResponseErrorWithErrorCode(). +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + respErr := &ResponseError{ + ErrorCode: errorCode, + StatusCode: resp.StatusCode, + RawResponse: resp, + } + log.Write(log.EventResponseError, respErr.Error()) return respErr } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go index 0684cb317..5cb87de2c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -15,24 +15,36 @@ import ( type Event = log.Event const ( - EventRequest = azlog.EventRequest - EventResponse = azlog.EventResponse - EventRetryPolicy = azlog.EventRetryPolicy - EventLRO = azlog.EventLRO + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventResponseError = azlog.EventResponseError + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO ) +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. func Write(cls log.Event, msg string) { log.Write(cls, msg) } +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. func Writef(cls log.Event, format string, a ...interface{}) { log.Writef(cls, format, a...) } +// SetListener will set the Logger to write to the specified listener. func SetListener(lst func(Event, string)) { log.SetListener(lst) } +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. func Should(cls log.Event) bool { return log.Should(cls) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 8f749f48d..330bf9a60 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.9.2" + Version = "v1.10.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go index 7bde29d0a..f260dac36 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -23,6 +23,11 @@ const ( // This includes information like the HTTP status code, headers, and request URL. EventResponse Event = "Response" + // EventResponseError entries contain information about HTTP responses that returned + // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). + // This includes the contents of ResponseError.Error(). + EventResponseError Event = "ResponseError" + // EventRetryPolicy entries contain information specific to the retry policy in use. EventRetryPolicy Event = "Retry" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go index 6d03b291e..c0d56158e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -14,6 +14,14 @@ import ( // NewResponseError creates an *azcore.ResponseError from the provided HTTP response. // Call this when a service request returns a non-successful status code. +// The error code will be extracted from the *http.Response, either from the x-ms-error-code +// header (preferred) or attempted to be parsed from the response body. func NewResponseError(resp *http.Response) error { return exported.NewResponseError(resp) } + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Use this variant when the error code is in a non-standard location. +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + return exported.NewResponseErrorWithErrorCode(resp, errorCode) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 5d1569c8d..bef05f2a3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -42,12 +42,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic } // EncodeQueryParams will parse and encode any query parameters in the specified URL. +// Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { before, after, found := strings.Cut(u, "?") if !found { return u, nil } - qp, err := url.ParseQuery(after) + // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. + // so, we must escape them first. note that this assumes that semicolons aren't + // being used as query param separators which is per the current RFC. + // for more info: + // https://github.com/golang/go/issues/25192 + // https://github.com/golang/go/issues/50034 + qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) if err != nil { return "", err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 58a34e313..c77a89c12 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -881,7 +881,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.0 => k8s.io/apiserver v0.29.0 +# k8s.io/apiserver v0.29.2 => k8s.io/apiserver v0.29.0 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1352,7 +1352,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.29.0 => k8s.io/cloud-provider v0.29.0 +# k8s.io/cloud-provider v0.29.2 => k8s.io/cloud-provider v0.29.0 ## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1368,7 +1368,7 @@ k8s.io/cloud-provider/names k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers -# k8s.io/component-base v0.29.0 => k8s.io/component-base v0.29.0 +# k8s.io/component-base v0.29.2 => k8s.io/component-base v0.29.0 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1390,14 +1390,14 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.29.0 => k8s.io/component-helpers v0.29.0 +# k8s.io/component-helpers v0.29.2 => k8s.io/component-helpers v0.29.0 ## explicit; go 1.21 k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.29.0 => k8s.io/controller-manager v0.29.0 +# k8s.io/controller-manager v0.29.2 => k8s.io/controller-manager v0.29.0 ## explicit; go 1.21 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1418,7 +1418,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.29.0 +# k8s.io/kms v0.29.2 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1449,7 +1449,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson ## explicit; go 1.21 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.29.0 => k8s.io/kubelet v0.29.0 +# k8s.io/kubelet v0.29.2 => k8s.io/kubelet v0.29.0 ## explicit; go 1.21 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 @@ -1555,7 +1555,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240220055818-b76605c9c708 +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -1655,7 +1655,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240220055818-b76605c9c708 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go index e47b8296d..10f3b2f60 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_standard.go @@ -153,7 +153,7 @@ func (as *availabilitySet) WaitForUpdateResult(ctx context.Context, future *azur } // DetachDisk detaches a disk from VM -func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach @@ -179,6 +179,9 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index fada0393f..3a3cc2397 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -163,7 +163,7 @@ func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Futur } // DetachDisk detaches a disk from VM -func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -193,6 +193,9 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go index d34ea871e..383812041 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmssflex.go @@ -122,7 +122,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, } // DetachDisk detaches a disk from VM -func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { vmName := mapNodeNameToVMName(nodeName) vm, err := fs.getVmssFlexVM(vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -148,6 +148,9 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, // found the disk klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) disks[i].ToBeDetached = pointer.Bool(true) + if forceDetach { + disks[i].DetachOption = compute.ForceDetach + } bFoundDisk = true } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go index 78de8bf84..c5b232bb6 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_mock_vmsets.go @@ -92,7 +92,7 @@ func (mr *MockVMSetMockRecorder) DeleteCacheForNode(nodeName any) *gomock.Call { } // DetachDisk mocks base method. -func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { +func (m *MockVMSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DetachDisk", ctx, nodeName, diskMap) ret0, _ := ret[0].(error) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index a26719f80..af3574bd4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -117,6 +117,7 @@ func (az *Cloud) getStorageAccounts(ctx context.Context, accountOptions *Account isTaggedWithSkip(acct) && isHnsPropertyEqual(acct, accountOptions) && isEnableNfsV3PropertyEqual(acct, accountOptions) && + isEnableHTTPSTrafficOnlyEqual(acct, accountOptions) && isAllowBlobPublicAccessEqual(acct, accountOptions) && isRequireInfrastructureEncryptionEqual(acct, accountOptions) && isAllowSharedKeyAccessEqual(acct, accountOptions) && @@ -717,22 +718,20 @@ func (az *Cloud) AddStorageAccountTags(ctx context.Context, subsID, resourceGrou return rerr } - originalLen := len(result.Tags) - newTags := result.Tags - if newTags == nil { - newTags = make(map[string]*string) - } - - // merge two tag map + // merge two tag map into one + newTags := make(map[string]*string) for k, v := range tags { newTags[k] = v } + for k, v := range result.Tags { + newTags[k] = v + } - if len(newTags) > originalLen { + if len(newTags) > len(result.Tags) { // only update when newTags is different from old tags _ = az.storageAccountCache.Delete(account) // clean cache updateParams := storage.AccountUpdateParameters{Tags: newTags} - klog.V(2).Infof("update storage account(%s) with tags(%+v)", account, newTags) + klog.V(2).Infof("add storage account(%s) with tags(%+v)", account, newTags) return az.StorageAccountClient.Update(ctx, subsID, resourceGroup, account, updateParams) } return nil @@ -759,6 +758,7 @@ func (az *Cloud) RemoveStorageAccountTag(ctx context.Context, subsID, resourceGr // only update when newTags is different from old tags _ = az.storageAccountCache.Delete(account) // clean cache updateParams := storage.AccountUpdateParameters{Tags: result.Tags} + klog.V(2).Infof("remove tag(%s) from storage account(%s)", key, account) return az.StorageAccountClient.Update(ctx, subsID, resourceGroup, account, updateParams) } return nil @@ -861,6 +861,10 @@ func isEnableNfsV3PropertyEqual(account storage.Account, accountOptions *Account return pointer.BoolDeref(accountOptions.EnableNfsV3, false) == pointer.BoolDeref(account.EnableNfsV3, false) } +func isEnableHTTPSTrafficOnlyEqual(account storage.Account, accountOptions *AccountOptions) bool { + return accountOptions.EnableHTTPSTrafficOnly == pointer.BoolDeref(account.EnableHTTPSTrafficOnly, true) +} + func isPrivateEndpointAsExpected(account storage.Account, accountOptions *AccountOptions) bool { if accountOptions.CreatePrivateEndpoint == nil { // CreatePrivateEndpoint is not set, match current account diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go index d680f5ced..8dfc95799 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go @@ -78,7 +78,7 @@ type VMSet interface { // AttachDisk attaches a disk to vm AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error // DetachDisk detaches a disk from vm - DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error + DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error // WaitForUpdateResult waits for the response of the update request // GetDataDisks gets a list of data disks attached to the node. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go index e149bcac2..58741f9e4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/config/azure_auth.go @@ -17,8 +17,6 @@ limitations under the License. package config import ( - "crypto/rsa" - "crypto/x509" "errors" "fmt" "io" @@ -30,8 +28,6 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "golang.org/x/crypto/pkcs12" - "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" @@ -147,13 +143,13 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment, r resource) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + if len(config.AADClientCertPath) > 0 { klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") certData, err := os.ReadFile(config.AADClientCertPath) if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) } - certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) if err != nil { return nil, fmt.Errorf("decoding the client certificate: %w", err) } @@ -197,8 +193,22 @@ func GetMultiTenantServicePrincipalToken(config *AzureAuthConfig, env *azure.Env env.ServiceManagementEndpoint) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting multi-tenant service principal token") + if len(config.AADClientCertPath) > 0 { + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve multi-tenant access token") + certData, err := os.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) + } + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %w", err) + } + return adal.NewMultiTenantServicePrincipalTokenFromCertificate( + multiTenantOAuthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) } return nil, ErrorNoAuth @@ -230,8 +240,22 @@ func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure env.ServiceManagementEndpoint) } - if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting network resources service principal token") + if len(config.AADClientCertPath) > 0 { + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token for network resources tenant") + certData, err := os.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err) + } + certificate, privateKey, err := adal.DecodePfxCertificateData(certData, config.AADClientCertPassword) + if err != nil { + return nil, fmt.Errorf("decoding the client certificate: %w", err) + } + return adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) } return nil, ErrorNoAuth @@ -303,21 +327,6 @@ func (config *AzureAuthConfig) UsesNetworkResourceInDifferentSubscription() bool return len(config.NetworkResourceSubscriptionID) > 0 && !strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID) } -// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and -// the private RSA key -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %w", err) - } - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") - } - - return certificate, rsaPrivateKey, nil -} - // azureStackOverrides ensures that the Environment matches what AKSe currently generates for Azure Stack func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identitySystem string) { env.ManagementPortalURL = strings.Replace(resourceManagerEndpoint, "https://management.", "https://portal.", -1) From 58c76c8491ceef3b4a6d2cda3ba74cfcd4e99fd4 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 13 Mar 2024 11:27:37 +0000 Subject: [PATCH 090/157] feat: provide nfsv3 protocol to enforce nfs mount add supported list --- pkg/blob/blob.go | 7 +++--- pkg/blob/blob_test.go | 4 ++-- pkg/blob/controllerserver_test.go | 2 +- pkg/blob/nodeserver.go | 2 +- test/e2e/dynamic_provisioning_test.go | 31 +++++++++++++++++++++++++++ 5 files changed, 39 insertions(+), 7 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 702d63e61..866e9cb4d 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -112,6 +112,7 @@ const ( Fuse2 = "fuse2" NFS = "nfs" AZNFS = "aznfs" + NFSv3 = "nfsv3" vnetResourceGroupField = "vnetresourcegroup" vnetNameField = "vnetname" subnetNameField = "subnetname" @@ -155,7 +156,7 @@ const ( ) var ( - supportedProtocolList = []string{Fuse, Fuse2, NFS} + supportedProtocolList = []string{Fuse, Fuse2, NFS, AZNFS} retriableErrors = []string{accountNotProvisioned, tooManyRequests, statusCodeNotFound, containerBeingDeletedDataplaneAPIError, containerBeingDeletedManagementAPIError, clientThrottled} supportedFSGroupChangePolicyList = []string{FSGroupChangeNone, string(v1.FSGroupChangeAlways), string(v1.FSGroupChangeOnRootMismatch)} ) @@ -762,7 +763,7 @@ func isSupportedProtocol(protocol string) bool { return true } for _, v := range supportedProtocolList { - if protocol == v { + if protocol == v || protocol == NFSv3 { return true } } @@ -804,7 +805,7 @@ func isSupportedContainerNamePrefix(prefix string) bool { // isNFSProtocol checks if the protocol is NFS or AZNFS func isNFSProtocol(protocol string) bool { protocol = strings.ToLower(protocol) - return protocol == NFS || protocol == AZNFS + return protocol == NFS || protocol == AZNFS || protocol == NFSv3 } // get storage account from secrets map diff --git a/pkg/blob/blob_test.go b/pkg/blob/blob_test.go index 5c503e238..b78727760 100644 --- a/pkg/blob/blob_test.go +++ b/pkg/blob/blob_test.go @@ -1745,8 +1745,8 @@ func TestIsNFSProtocol(t *testing.T) { expectedResult: true, }, { - protocol: "NFSv3", - expectedResult: false, + protocol: "nfsv3", + expectedResult: true, }, { protocol: "aznfs", diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 3009ab856..22bc9179e 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -176,7 +176,7 @@ func TestCreateVolume(t *testing.T) { controllerServiceCapability, } _, err := d.CreateVolume(context.Background(), req) - expectedErr := status.Errorf(codes.InvalidArgument, "protocol(unit-test) is not supported, supported protocol list: [fuse fuse2 nfs]") + expectedErr := status.Errorf(codes.InvalidArgument, "protocol(unit-test) is not supported, supported protocol list: [fuse fuse2 nfs aznfs]") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr) } diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 94a98bdf3..8fe9dadf6 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -346,7 +346,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe targetPath, protocol, volumeID, attrib, mountFlags, serverAddress) mountType := AZNFS - if !d.enableAznfsMount { + if !d.enableAznfsMount || protocol == NFSv3 { mountType = NFS } diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go index 679edabd7..05a09a659 100644 --- a/test/e2e/dynamic_provisioning_test.go +++ b/test/e2e/dynamic_provisioning_test.go @@ -529,6 +529,37 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() { test.Run(ctx, cs, ns) }) + ginkgo.It("enforce with nfs mount [nfs]", func(ctx ginkgo.SpecContext) { + if isAzureStackCloud { + ginkgo.Skip("test case is not available for Azure Stack") + } + pods := []testsuites.PodDetails{ + { + Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", + Volumes: []testsuites.VolumeDetails{ + { + ClaimSize: "10Gi", + MountOptions: []string{ + "nconnect=8", + }, + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + }, + }, + }, + } + test := testsuites.DynamicallyProvisionedCmdVolumeTest{ + CSIDriver: testDriver, + Pods: pods, + StorageClassParameters: map[string]string{ + "protocol": "nfsv3", + }, + } + test.Run(ctx, cs, ns) + }) + ginkgo.It("should create a NFSv3 volume on demand with zero mountPermissions [nfs]", func(ctx ginkgo.SpecContext) { if isAzureStackCloud { ginkgo.Skip("test case is not available for Azure Stack") From 4aad94ab6a27e65cea23fda07ede63f2aad40686 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 14 Mar 2024 10:38:43 +0000 Subject: [PATCH 091/157] test: ignore azcopy CVE-2024-24786 --- .trivyignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 000000000..b0f56dcff --- /dev/null +++ b/.trivyignore @@ -0,0 +1 @@ +CVE-2024-24786 From 1c44f57d7217ac8eb24b24eadeb82b12b363af94 Mon Sep 17 00:00:00 2001 From: umagnus Date: Thu, 14 Mar 2024 08:13:20 +0000 Subject: [PATCH 092/157] add exec timeout func --- pkg/blob/controllerserver.go | 58 ++++++++++++++----------------- pkg/blob/controllerserver_test.go | 9 ++--- pkg/blob/volume_lock.go | 3 +- pkg/util/util.go | 28 +++++++++++++++ pkg/util/util_test.go | 52 +++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 38 deletions(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 5dbe00477..ae469954e 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -61,8 +61,6 @@ const ( MSI = "MSI" SPN = "SPN" authorizationPermissionMismatch = "AuthorizationPermissionMismatch" - - waitForAzCopyInterval = 2 * time.Second ) // CreateVolume provisions a volume @@ -85,7 +83,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) // logging the job status if it's volume cloning if req.GetVolumeContentSource() != nil { jobState, percent, err := d.azcopy.GetAzcopyJob(volName, []string{}) - klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) + return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsWithAzcopyFmt, volName, jobState, percent, err) } return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volName) } @@ -759,43 +757,41 @@ func (d *Driver) copyBlobContainer(req *csi.CreateVolumeRequest, accountSasToken return fmt.Errorf("srcContainerName(%s) or dstContainerName(%s) is empty", srcContainerName, dstContainerName) } - timeAfter := time.After(time.Duration(d.waitForAzCopyTimeoutMinutes) * time.Minute) - timeTick := time.Tick(waitForAzCopyInterval) srcPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, srcContainerName, accountSasToken) dstPath := fmt.Sprintf("https://%s.blob.%s/%s%s", accountName, storageEndpointSuffix, dstContainerName, accountSasToken) jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) - if jobState == util.AzcopyJobError || jobState == util.AzcopyJobCompleted { + switch jobState { + case util.AzcopyJobError, util.AzcopyJobCompleted: return err - } - klog.V(2).Infof("begin to copy blob container %s to %s", srcContainerName, dstContainerName) - for { - select { - case <-timeTick: - jobState, percent, err := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) - klog.V(2).Infof("azcopy job status: %s, copy percent: %s%%, error: %v", jobState, percent, err) - switch jobState { - case util.AzcopyJobError, util.AzcopyJobCompleted: - return err - case util.AzcopyJobNotFound: - klog.V(2).Infof("copy blob container %s to %s", srcContainerName, dstContainerName) - cmd := exec.Command("azcopy", "copy", srcPath, dstPath, "--recursive", "--check-length=false") - if len(authAzcopyEnv) > 0 { - cmd.Env = append(os.Environ(), authAzcopyEnv...) - } - out, copyErr := cmd.CombinedOutput() - if copyErr != nil { - klog.Warningf("CopyBlobContainer(%s, %s, %s) failed with error(%v): %v", resourceGroupName, accountName, dstPath, copyErr, string(out)) - } else { - klog.V(2).Infof("copied blob container %s to %s successfully", srcContainerName, dstContainerName) - } - return copyErr + case util.AzcopyJobRunning: + return fmt.Errorf("wait for the existing AzCopy job to complete, current copy percentage is %s%%", percent) + case util.AzcopyJobNotFound: + klog.V(2).Infof("copy blob container %s to %s", srcContainerName, dstContainerName) + execFunc := func() error { + cmd := exec.Command("azcopy", "copy", srcPath, dstPath, "--recursive", "--check-length=false") + if len(authAzcopyEnv) > 0 { + cmd.Env = append(os.Environ(), authAzcopyEnv...) + } + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("exec error: %v, output: %v", err, string(out)) } - case <-timeAfter: - return fmt.Errorf("timeout waiting for copy blob container %s to %s succeed", srcContainerName, dstContainerName) + return nil + } + timeoutFunc := func() error { + _, percent, _ := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) + return fmt.Errorf("timeout waiting for copy blob container %s to %s complete, current copy percent: %s%%", srcContainerName, dstContainerName, percent) + } + copyErr := util.WaitForExecCompletion(time.Duration(d.waitForAzCopyTimeoutMinutes)*time.Minute, execFunc, timeoutFunc) + if copyErr != nil { + klog.Warningf("CopyBlobContainer(%s, %s, %s) failed with error: %v", resourceGroupName, accountName, dstPath, copyErr) + } else { + klog.V(2).Infof("copied blob container %s to %s successfully", srcContainerName, dstContainerName) } + return copyErr } + return err } // copyVolume copies a volume form volume or snapshot, snapshot is not supported now diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 22bc9179e..bb3a7a219 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -1712,7 +1712,7 @@ func TestCopyVolume(t *testing.T) { }, }, { - name: "azcopy job is first in progress and then be completed", + name: "azcopy job is in progress", testFunc: func(t *testing.T) { d := NewFakeDriver() mp := map[string]string{} @@ -1739,15 +1739,12 @@ func TestCopyVolume(t *testing.T) { m := util.NewMockEXEC(ctrl) listStr1 := "JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9\nStart Time: Monday, 07-Aug-23 03:29:54 UTC\nStatus: InProgress\nCommand: copy https://{accountName}.file.core.windows.net/{srcFileshare}{SAStoken} https://{accountName}.file.core.windows.net/{dstFileshare}{SAStoken} --recursive --check-length=false" - listStr2 := "JobId: ed1c3833-eaff-fe42-71d7-513fb065a9d9\nStart Time: Monday, 07-Aug-23 03:29:54 UTC\nStatus: Completed\nCommand: copy https://{accountName}.file.core.windows.net/{srcFileshare}{SAStoken} https://{accountName}.file.core.windows.net/{dstFileshare}{SAStoken} --recursive --check-length=false" - o1 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr1, nil).Times(1) + m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr1, nil).Times(1) m.EXPECT().RunCommand(gomock.Not("azcopy jobs list | grep dstBlobContainer -B 3"), gomock.Any()).Return("Percent Complete (approx): 50.0", nil) - o2 := m.EXPECT().RunCommand(gomock.Eq("azcopy jobs list | grep dstContainer -B 3"), gomock.Any()).Return(listStr2, nil) - gomock.InOrder(o1, o2) d.azcopy.ExecCmd = m - var expectedErr error + expectedErr := fmt.Errorf("wait for the existing AzCopy job to complete, current copy percentage is 50.0%%") err := d.copyVolume(req, "sastoken", nil, "dstContainer", "core.windows.net") if !reflect.DeepEqual(err, expectedErr) { t.Errorf("Unexpected error: %v", err) diff --git a/pkg/blob/volume_lock.go b/pkg/blob/volume_lock.go index 2996d230d..95f779a9a 100644 --- a/pkg/blob/volume_lock.go +++ b/pkg/blob/volume_lock.go @@ -23,7 +23,8 @@ import ( ) const ( - volumeOperationAlreadyExistsFmt = "An operation with the given Volume ID %s already exists" + volumeOperationAlreadyExistsFmt = "An operation with the given Volume ID %s already exists" + volumeOperationAlreadyExistsWithAzcopyFmt = "An operation using azcopy with the given Volume ID %s already exists. Azcopy job status: %s, copy percent: %s%%, error: %v" ) // VolumeLocks implements a map with atomic operations. It stores a set of all volume IDs diff --git a/pkg/util/util.go b/pkg/util/util.go index 2cf64a99b..bb5e60052 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -24,6 +24,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/go-ini/ini" "github.com/pkg/errors" @@ -387,3 +388,30 @@ func SetVolumeOwnership(path, gid, policy string) error { } return volume.SetVolumeOwnership(&VolumeMounter{path: path}, path, &gidInt64, &fsGroupChangePolicy, nil) } + +// ExecFunc returns a exec function's output and error +type ExecFunc func() (err error) + +// TimeoutFunc returns output and error if an ExecFunc timeout +type TimeoutFunc func() (err error) + +// WaitForExecCompletion waits for the exec function to complete or return timeout error +func WaitForExecCompletion(timeout time.Duration, execFunc ExecFunc, timeoutFunc TimeoutFunc) error { + // Create a channel to receive the result of the azcopy exec function + done := make(chan bool) + var err error + + // Start the azcopy exec function in a goroutine + go func() { + err = execFunc() + done <- true + }() + + // Wait for the function to complete or time out + select { + case <-done: + return err + case <-time.After(timeout): + return timeoutFunc() + } +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 0e97a7b5f..3cec59e5a 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -656,3 +656,55 @@ func TestSetVolumeOwnership(t *testing.T) { } } } + +func TestWaitForExecCompletion(t *testing.T) { + tests := []struct { + desc string + timeout time.Duration + execFunc ExecFunc + timeoutFunc TimeoutFunc + expectedErr error + }{ + { + desc: "execFunc returns error", + timeout: 1 * time.Second, + execFunc: func() error { + return fmt.Errorf("execFunc error") + }, + timeoutFunc: func() error { + return fmt.Errorf("timeout error") + }, + expectedErr: fmt.Errorf("execFunc error"), + }, + { + desc: "execFunc timeout", + timeout: 1 * time.Second, + execFunc: func() error { + time.Sleep(2 * time.Second) + return nil + }, + timeoutFunc: func() error { + return fmt.Errorf("timeout error") + }, + expectedErr: fmt.Errorf("timeout error"), + }, + { + desc: "execFunc completed successfully", + timeout: 1 * time.Second, + execFunc: func() error { + return nil + }, + timeoutFunc: func() error { + return fmt.Errorf("timeout error") + }, + expectedErr: nil, + }, + } + + for _, test := range tests { + err := WaitForExecCompletion(test.timeout, test.execFunc, test.timeoutFunc) + if err != nil && (err.Error() != test.expectedErr.Error()) { + t.Errorf("unexpected error: %v, expected error: %v", err, test.expectedErr) + } + } +} From ccb20173548bbb4a32ac189371e87412150b5286 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 15 Mar 2024 06:27:50 +0000 Subject: [PATCH 093/157] feat: upgrade azcopy version to 10.23.0 for volume clone feature --- pkg/blobplugin/Dockerfile | 4 ++-- test/sanity/run-test.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 5699eb4bb..e775e8e1f 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -44,9 +44,9 @@ else \ RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy -ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_amd64_10.22.2.tar.gz +ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_amd64_10.23.0.tar.gz RUN if [ "$ARCH" == "arm64" ] ; then \ - azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_arm64_10.22.2.tar.gz; fi + azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_arm64_10.23.0.tar.gz; fi RUN wget -O azcopy.tar.gz ${azcopyURL} && \ tar xvzf azcopy.tar.gz -C . && rm azcopy.tar.gz && \ mv ./azcopy_linux_$ARCH_*/azcopy /usr/local/bin/azcopy && \ diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh index 7462ee4d4..17b37e1d2 100755 --- a/test/sanity/run-test.sh +++ b/test/sanity/run-test.sh @@ -36,7 +36,7 @@ azcopyPath="/usr/local/bin/azcopy" if [ ! -f "$azcopyPath" ]; then azcopyTarFile="azcopy.tar.gz" echo 'Downloading azcopy...' - wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.22.2-20240110/azcopy_linux_amd64_10.22.2.tar.gz + wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_amd64_10.23.0.tar.gz tar -zxvf $azcopyTarFile mv ./azcopy*/azcopy /usr/local/bin/azcopy rm -rf ./$azcopyTarFile From a1251ed8da8c54653afaf111cfd8fd7018cb8e11 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 18 Mar 2024 02:45:24 +0000 Subject: [PATCH 094/157] chore: only use sas token for azcopy when useDataPlaneAPI is used --- pkg/blob/blob.go | 2 +- pkg/blob/controllerserver.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/blob/blob.go b/pkg/blob/blob.go index 866e9cb4d..286aa8c61 100644 --- a/pkg/blob/blob.go +++ b/pkg/blob/blob.go @@ -285,7 +285,7 @@ func NewDriver(options *DriverOptions, kubeClient kubernetes.Interface, cloud *p if d.accountSearchCache, err = azcache.NewTimedCache(time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) } - if d.dataPlaneAPIVolCache, err = azcache.NewTimedCache(10*time.Minute, getter, false); err != nil { + if d.dataPlaneAPIVolCache, err = azcache.NewTimedCache(24*30*time.Hour, getter, false); err != nil { klog.Fatalf("%v", err) } if d.azcopySasTokenCache, err = azcache.NewTimedCache(15*time.Minute, getter, false); err != nil { diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index ae469954e..59fef1356 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -844,7 +844,7 @@ func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { func (d *Driver) getAzcopyAuth(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, []string, error) { var authAzcopyEnv []string useSasToken := false - if len(secrets) == 0 && len(secretName) == 0 { + if !d.useDataPlaneAPI("", accountName) && len(secrets) == 0 && len(secretName) == 0 { var err error authAzcopyEnv, err = d.authorizeAzcopyWithIdentity() if err != nil { From 122c943646b2012d5ca0f428704abb5c144c9a98 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 18 Mar 2024 02:58:16 +0000 Subject: [PATCH 095/157] fix: fix nfs mount stuck issue --- pkg/blob/nodeserver.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 8fe9dadf6..2992dc88c 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -32,7 +32,6 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -352,9 +351,9 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe source := fmt.Sprintf("%s:/%s/%s", serverAddress, accountName, containerName) mountOptions := util.JoinMountOptions(mountFlags, []string{"sec=sys,vers=3,nolock"}) - if err := wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { - return true, d.mounter.MountSensitive(source, targetPath, mountType, mountOptions, []string{}) - }); err != nil { + execFunc := func() error { return d.mounter.MountSensitive(source, targetPath, mountType, mountOptions, []string{}) } + timeoutFunc := func() error { return fmt.Errorf("time out") } + if err := volumehelper.WaitForExecCompletion(2*time.Minute, execFunc, timeoutFunc); err != nil { var helpLinkMsg string if d.appendMountErrorHelpLink { helpLinkMsg = "\nPlease refer to http://aka.ms/blobmounterror for possible causes and solutions for mount errors." From 5febe4da132745366a08ff8d9519c6ef4cce255f Mon Sep 17 00:00:00 2001 From: Fan Shang Xiang Date: Mon, 18 Mar 2024 15:41:51 +0800 Subject: [PATCH 096/157] Add group update policy for k8s.io dependencies --- .github/dependabot.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index b6331fddb..75fe5d98e 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -9,6 +9,14 @@ updates: - "release-note-none" - "ok-to-test" open-pull-requests-limit: 1 + groups: + k8s: + applies-to: version-updates + patterns: + - "k8s.io*" + update-types: + - "patch" + - "minor" - package-ecosystem: "github-actions" directory: "/" schedule: From b3c9100b1d499674ef50b85a82d032f2ab23dd99 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 18 Mar 2024 22:21:32 +0800 Subject: [PATCH 097/157] Update e2e_usage.md --- deploy/example/e2e_usage.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/example/e2e_usage.md b/deploy/example/e2e_usage.md index 2f60f4fe9..655572706 100644 --- a/deploy/example/e2e_usage.md +++ b/deploy/example/e2e_usage.md @@ -9,7 +9,6 @@ kubectl create -f https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-dri ``` #### Option#2: bring your own storage account - > only available from `v0.9.0` > This option does not depend on cloud provider config file, supports cross subscription and on-premise cluster scenario. - Use `kubectl create secret` to create `azure-secret` with existing storage account name and key ```console @@ -133,7 +132,8 @@ blobfuse 14G 41M 13G 1% /mnt/blob In the above example, there is a `/mnt/blob` directory mounted as `blobfuse` filesystem. #### Option#3: Inline volume - > to avoid performance issue, use persistent volume instead of inline volume when numerous pods are accessing the same volume. + > - inline volume does not support nfs protocol + > - to avoid performance issue, use persistent volume instead of inline volume when numerous pods are accessing the same volume. - in below blobfuse mount example, create `azure-secret` with existing storage account name and key in the same namespace as pod, both secret and pod are in `default` namespace ```console kubectl create secret generic azure-secret --from-literal azurestorageaccountname=NAME --from-literal azurestorageaccountkey="KEY" --type=Opaque From 1d127708503dc7b2b8d2549a15be020eea807434 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 16:12:16 +0000 Subject: [PATCH 098/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.16.0 to 2.17.0 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.16.0 to 2.17.0. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.16.0...v2.17.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 11 +++++ vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 4 +- .../ginkgo/v2/reporters/default_reporter.go | 47 +++++++++++++------ .../github.com/onsi/ginkgo/v2/types/config.go | 3 ++ .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/modules.txt | 2 +- 8 files changed, 53 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 60ef76a63..7bb912277 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.16.0 + github.com/onsi/ginkgo/v2 v2.17.0 github.com/onsi/gomega v1.31.1 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 diff --git a/go.sum b/go.sum index a4b8006d8..0ea7b62ce 100644 --- a/go.sum +++ b/go.sum @@ -207,8 +207,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= -github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.0 h1:kdnunFXpBjbzN56hcJHrXZ8M+LOkenKA7NnBzTNigTI= +github.com/onsi/ginkgo/v2 v2.17.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index b79ee9c5a..57992854b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,14 @@ +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + ## 2.16.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 0e633f309..a3e8237e9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -792,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be758..4026859ec 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel @@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a7..7c82065df 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -89,6 +89,7 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool JSONReport string JUnitReport string @@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 675f8db2f..ccd6516fa 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.16.0" +const VERSION = "2.17.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index c77a89c12..12536e2a4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -344,7 +344,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.16.0 +# github.com/onsi/ginkgo/v2 v2.17.0 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config From 7c34ac2ebd33851fc5ea46e32540c2a261173820 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 01:20:42 +0000 Subject: [PATCH 099/157] chore(deps): bump github.com/onsi/gomega from 1.31.1 to 1.32.0 Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.31.1 to 1.32.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.31.1...v1.32.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/gomega/CHANGELOG.md | 13 +++++++++++++ vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/modules.txt | 2 +- 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 7bb912277..384b12150 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/ginkgo/v2 v2.17.0 - github.com/onsi/gomega v1.31.1 + github.com/onsi/gomega v1.32.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 0ea7b62ce..d92ba9033 100644 --- a/go.sum +++ b/go.sum @@ -209,8 +209,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.17.0 h1:kdnunFXpBjbzN56hcJHrXZ8M+LOkenKA7NnBzTNigTI= github.com/onsi/ginkgo/v2 v2.17.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9a14b8151..01ec5245c 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + ## 1.31.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 5b46a1658..ffb81b1fe 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.1" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/modules.txt b/vendor/modules.txt index 12536e2a4..9d531166e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -366,7 +366,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.31.1 +# github.com/onsi/gomega v1.32.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format From 00591e092445607eeba1c61cda81bffcf71c0502 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Mar 2024 07:31:51 +0000 Subject: [PATCH 100/157] cleanup: rename WaitUntilTimeout func --- pkg/blob/controllerserver.go | 2 +- pkg/blob/nodeserver.go | 2 +- pkg/util/util.go | 4 ++-- pkg/util/util_test.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 59fef1356..cf5213865 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -783,7 +783,7 @@ func (d *Driver) copyBlobContainer(req *csi.CreateVolumeRequest, accountSasToken _, percent, _ := d.azcopy.GetAzcopyJob(dstContainerName, authAzcopyEnv) return fmt.Errorf("timeout waiting for copy blob container %s to %s complete, current copy percent: %s%%", srcContainerName, dstContainerName, percent) } - copyErr := util.WaitForExecCompletion(time.Duration(d.waitForAzCopyTimeoutMinutes)*time.Minute, execFunc, timeoutFunc) + copyErr := util.WaitUntilTimeout(time.Duration(d.waitForAzCopyTimeoutMinutes)*time.Minute, execFunc, timeoutFunc) if copyErr != nil { klog.Warningf("CopyBlobContainer(%s, %s, %s) failed with error: %v", resourceGroupName, accountName, dstPath, copyErr) } else { diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 2992dc88c..61f78bd7d 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -353,7 +353,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe mountOptions := util.JoinMountOptions(mountFlags, []string{"sec=sys,vers=3,nolock"}) execFunc := func() error { return d.mounter.MountSensitive(source, targetPath, mountType, mountOptions, []string{}) } timeoutFunc := func() error { return fmt.Errorf("time out") } - if err := volumehelper.WaitForExecCompletion(2*time.Minute, execFunc, timeoutFunc); err != nil { + if err := volumehelper.WaitUntilTimeout(2*time.Minute, execFunc, timeoutFunc); err != nil { var helpLinkMsg string if d.appendMountErrorHelpLink { helpLinkMsg = "\nPlease refer to http://aka.ms/blobmounterror for possible causes and solutions for mount errors." diff --git a/pkg/util/util.go b/pkg/util/util.go index bb5e60052..75bd3d583 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -395,8 +395,8 @@ type ExecFunc func() (err error) // TimeoutFunc returns output and error if an ExecFunc timeout type TimeoutFunc func() (err error) -// WaitForExecCompletion waits for the exec function to complete or return timeout error -func WaitForExecCompletion(timeout time.Duration, execFunc ExecFunc, timeoutFunc TimeoutFunc) error { +// WaitUntilTimeout waits for the exec function to complete or return timeout error +func WaitUntilTimeout(timeout time.Duration, execFunc ExecFunc, timeoutFunc TimeoutFunc) error { // Create a channel to receive the result of the azcopy exec function done := make(chan bool) var err error diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 3cec59e5a..d2d0b6083 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -657,7 +657,7 @@ func TestSetVolumeOwnership(t *testing.T) { } } -func TestWaitForExecCompletion(t *testing.T) { +func TestWaitUntilTimeout(t *testing.T) { tests := []struct { desc string timeout time.Duration @@ -702,7 +702,7 @@ func TestWaitForExecCompletion(t *testing.T) { } for _, test := range tests { - err := WaitForExecCompletion(test.timeout, test.execFunc, test.timeoutFunc) + err := WaitUntilTimeout(test.timeout, test.execFunc, test.timeoutFunc) if err != nil && (err.Error() != test.expectedErr.Error()) { t.Errorf("unexpected error: %v, expected error: %v", err, test.expectedErr) } From ffac3872026f137abe604085886d70fb8aef9f43 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 19 Mar 2024 13:09:21 +0000 Subject: [PATCH 101/157] chore: refine refine private endpoint to reduce error messages --- go.mod | 12 +++---- go.sum | 12 +++---- vendor/modules.txt | 12 +++---- .../cloud-provider-azure/pkg/azclient/auth.go | 14 ++++++++ .../pkg/azclient/cloud.go | 8 +++++ .../virtualmachineclient/interface.go | 2 +- .../cloud-provider-azure/pkg/consts/consts.go | 2 ++ .../pkg/provider/azure_storageaccount.go | 36 ++++++++++--------- 8 files changed, 63 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 384b12150..505b99252 100644 --- a/go.mod +++ b/go.mod @@ -29,16 +29,16 @@ require ( golang.org/x/net v0.22.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/client-go v0.29.2 + k8s.io/api v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v0.29.3 k8s.io/component-base v0.29.2 k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc sigs.k8s.io/yaml v1.4.0 ) @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index d92ba9033..bb0dd2cdb 100644 --- a/go.sum +++ b/go.sum @@ -462,12 +462,12 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 h1:rY8tqbl7oTnsMVdrRshRfWbtrVNAJ6wPjJvt/iecNvI= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8/go.mod h1:7YA0Tq3FmmhaMfVY/2VH+vZgsRsYgbtE3P5+a6UOyAk= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf h1:pyQe3MFQp1Hb8iya4LaZiBEh3M0RgZ/yv0/ws5memP4= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf/go.mod h1:cyVxBbpj/UR4Vi4KhxO3vhzBB2JcN08TngoPQ0J0aWc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 h1:8vkgKNPp/DScHQCe8xnch5DaNNZAj2Mw93Mkx1CXSB4= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8/go.mod h1:Lgaq/yv2awggDs6zFF85347o7L1ucRKx57b55qjh470= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 h1:F+99oLx87DQfrS4lstZ1Sp+88ADqzsmb9H383BhLFN0= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559/go.mod h1:JqLgOmF8R3ymzVYrSMXMssDVmbzSVrj7olvrQUffMkI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc h1:YHsVYAF6cioowCy+5vp4d3/GVHyypMhFkc6FgBF5E+g= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index 9d531166e..c579cdc32 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -757,7 +757,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.2 => k8s.io/api v0.29.0 +# k8s.io/api v0.29.3 => k8s.io/api v0.29.0 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -818,7 +818,7 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.29.2 => k8s.io/apimachinery v0.29.0 +# k8s.io/apimachinery v0.29.3 => k8s.io/apimachinery v0.29.0 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1029,7 +1029,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.2 => k8s.io/client-go v0.29.0 +# k8s.io/client-go v0.29.3 => k8s.io/client-go v0.29.0 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1555,7 +1555,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240308123135-28150a3f23d8 +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient @@ -1613,7 +1613,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240218020800-ba9f211f36bf +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient @@ -1655,7 +1655,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240308123135-28150a3f23d8 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go index 5c519c141..503efd3db 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go @@ -128,6 +128,20 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, err } + if len(armConfig.NetworkResourceTenantID) > 0 && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { + networkClientSecretCredential, err = azidentity.NewClientCertificateCredential(armConfig.NetworkResourceTenantID, config.GetAADClientID(), certificate, privateKey, credOptions) + if err != nil { + return nil, err + } + credOptions = &azidentity.ClientCertificateCredentialOptions{ + ClientOptions: *clientOption, + AdditionallyAllowedTenants: []string{armConfig.NetworkResourceTenantID}, + } + multiTenantCredential, err = azidentity.NewClientCertificateCredential(armConfig.GetTenantID(), config.GetAADClientID(), certificate, privateKey, credOptions) + if err != nil { + return nil, err + } + } } return &AuthProvider{ diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go index b88e88049..2e7d88324 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go @@ -77,6 +77,14 @@ func AzureCloudConfigFromURL(endpoint string) (*cloud.Configuration, error) { } if len(metadata) > 0 { + // We use the endpoint to build our config, but on ASH the config returned + // does not contain the endpoint, and this is not accounted for. This + // ultimately unsets it for the returned config, causing the bootstrap of + // the provider to fail. Instead, check if the endpoint is returned, and if + // it is not then set it. + if len(metadata[0].ResourceManager) == 0 { + metadata[0].ResourceManager = endpoint + } return &cloud.Configuration{ ActiveDirectoryAuthorityHost: metadata[0].Authentication.LoginEndpoint, Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go index ade11e4e3..b8ae4eebe 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go @@ -34,6 +34,6 @@ type Interface interface { utils.ListFunc[armcompute.VirtualMachine] InstanceView(ctx context.Context, resourceGroupName string, vmName string) (*armcompute.VirtualMachineInstanceView, error) ListVMInstanceView(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) - BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachine, options *armcompute.VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientCreateOrUpdateResponse], error) BeginAttachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.AttachDetachDataDisksRequest, options *armcompute.VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*runtime.Poller[armcompute.VirtualMachinesClientAttachDetachDataDisksResponse], error) + BeginUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters armcompute.VirtualMachineUpdate, options *armcompute.VirtualMachinesClientBeginUpdateOptions) (*runtime.Poller[armcompute.VirtualMachinesClientUpdateResponse], error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index 75bd1708b..f9692c201 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -405,6 +405,8 @@ const ( ReferencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned" // ParentResourceNotFoundMessageCode is the error code that the parent VMSS of the VM is not found. ParentResourceNotFoundMessageCode = "ParentResourceNotFound" + // ResourceNotFoundMessageCode is the error code that the resource is not found. + ResourceNotFoundMessageCode = "ResourceNotFound" // ConcurrentRequestConflictMessage is the error message that the request failed due to the conflict with another concurrent operation. ConcurrentRequestConflictMessage = "The request failed due to conflict with a concurrent request." // CannotUpdateVMBeingDeletedMessagePrefix is the prefix of the error message that the request failed due to delete a VM that is being deleted diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index af3574bd4..372353cfa 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -33,7 +33,6 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/pointer" - "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" "sigs.k8s.io/cloud-provider-azure/pkg/cache" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" @@ -348,22 +347,29 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou if pointer.BoolDeref(accountOptions.CreatePrivateEndpoint, false) { clientFactory := az.NetworkClientFactory if clientFactory == nil { + // multi-tenant support clientFactory = az.ComputeClientFactory } if _, err := clientFactory.GetPrivateZoneClient().Get(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { - klog.V(2).Infof("get private dns zone %s returned with %v", privateDNSZoneName, err.Error()) - // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup - if err := az.createPrivateDNSZone(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { - return "", "", fmt.Errorf("create private DNS zone(%s) in resourceGroup(%s): %w", privateDNSZoneName, vnetResourceGroup, err) + if strings.Contains(err.Error(), consts.ResourceNotFoundMessageCode) { + // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup + if err := az.createPrivateDNSZone(ctx, vnetResourceGroup, privateDNSZoneName); err != nil { + return "", "", fmt.Errorf("create private DNS zone(%s) in resourceGroup(%s): %w", privateDNSZoneName, vnetResourceGroup, err) + } + } else { + return "", "", fmt.Errorf("get private dns zone %s returned with %v", privateDNSZoneName, err.Error()) } } // Create virtual link to the private DNS zone vNetLinkName := vnetName + "-vnetlink" if _, err := clientFactory.GetVirtualNetworkLinkClient().Get(ctx, vnetResourceGroup, privateDNSZoneName, vNetLinkName); err != nil { - klog.V(2).Infof("get virtual link for vnet(%s) and DNS Zone(%s) returned with %v", vnetName, privateDNSZoneName, err.Error()) - if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName); err != nil { - return "", "", fmt.Errorf("create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s): %w", vnetName, privateDNSZoneName, vnetResourceGroup, err) + if strings.Contains(err.Error(), consts.ResourceNotFoundMessageCode) { + if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName); err != nil { + return "", "", fmt.Errorf("create virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s): %w", vnetName, privateDNSZoneName, vnetResourceGroup, err) + } + } else { + return "", "", fmt.Errorf("get virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s) returned with %w", vnetName, privateDNSZoneName, vnetResourceGroup, err) } } } @@ -615,10 +621,9 @@ func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup, pr klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", privateDNSZoneName, vnetResourceGroup) location := LocationGlobal privateDNSZone := privatedns.PrivateZone{Location: &location} - var clientFactory azclient.ClientFactory - if az.NetworkClientFactory != nil { - clientFactory = az.NetworkClientFactory - } else { + clientFactory := az.NetworkClientFactory + if clientFactory == nil { + // multi-tenant support clientFactory = az.ComputeClientFactory } privatednsclient := clientFactory.GetPrivateZoneClient() @@ -635,10 +640,9 @@ func (az *Cloud) createPrivateDNSZone(ctx context.Context, vnetResourceGroup, pr func (az *Cloud) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, privateDNSZoneName, vnetResourceGroup) - var clientFactory azclient.ClientFactory - if az.NetworkClientFactory != nil { - clientFactory = az.NetworkClientFactory - } else { + clientFactory := az.NetworkClientFactory + if clientFactory == nil { + // multi-tenant support clientFactory = az.ComputeClientFactory } vnetLinkClient := clientFactory.GetVirtualNetworkLinkClient() From 3640bafc88aebf150b1a65cc72893f607744430d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 16:30:27 +0000 Subject: [PATCH 102/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.0-20240312050048-978ce44d3559 to 0.0.2. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/commits/pkg/azclient/v0.0.2) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 8 ++++---- vendor/modules.txt | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 505b99252..2b00cc1fb 100644 --- a/go.mod +++ b/go.mod @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index bb0dd2cdb..b51a78a4e 100644 --- a/go.sum +++ b/go.sum @@ -408,8 +408,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/dnaeon/go-vcr.v3 v3.1.2 h1:F1smfXBqQqwpVifDfUBQG6zzaGjzT+EnVZakrOdr5wA= -gopkg.in/dnaeon/go-vcr.v3 v3.1.2/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0 h1:Rltp0Vf+Aq0u4rQXgmXgtgoRDStTnFN83cWgSGSoRzM= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -464,8 +464,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 h1:F+99oLx87DQfrS4lstZ1Sp+88ADqzsmb9H383BhLFN0= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559/go.mod h1:JqLgOmF8R3ymzVYrSMXMssDVmbzSVrj7olvrQUffMkI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 h1:9Zp+uWnxdUOoy/FaQK1DjPfLrzw8TyMEf0aaQ0dtq6c= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2/go.mod h1:JKWYkoOyET3wsN0Kk9WxA+zpopkuCy+v4+mrnJ60Yyk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc h1:YHsVYAF6cioowCy+5vp4d3/GVHyypMhFkc6FgBF5E+g= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index c579cdc32..e9a99acf3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1613,7 +1613,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20240312050048-978ce44d3559 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From 944e465c45d087e211e4f2cfc6293946ca1156b3 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 20 Mar 2024 06:16:26 +0000 Subject: [PATCH 103/157] chore: refine sastoken cache refine --- pkg/blob/controllerserver.go | 48 +++++++++++++++++-------------- pkg/blob/controllerserver_test.go | 3 +- 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index cf5213865..5d72f091e 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -843,46 +843,40 @@ func (d *Driver) authorizeAzcopyWithIdentity() ([]string, error) { // 3. azcopy returns AuthorizationPermissionMismatch error when using service principal or managed identity func (d *Driver) getAzcopyAuth(ctx context.Context, accountName, accountKey, storageEndpointSuffix string, accountOptions *azure.AccountOptions, secrets map[string]string, secretName, secretNamespace string) (string, []string, error) { var authAzcopyEnv []string + var err error useSasToken := false if !d.useDataPlaneAPI("", accountName) && len(secrets) == 0 && len(secretName) == 0 { - var err error + // search in cache first + if cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault); err == nil && cache != nil { + klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) + return cache.(string), nil, nil + } + authAzcopyEnv, err = d.authorizeAzcopyWithIdentity() if err != nil { klog.Warningf("failed to authorize azcopy with identity, error: %v", err) } else { if len(authAzcopyEnv) > 0 { - // search in cache first - cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) - if err != nil { - return "", nil, fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) + out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) + if testErr != nil { + return "", nil, fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) } - if cache != nil { - klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) + if strings.Contains(out, authorizationPermissionMismatch) { + klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) useSasToken = true - } else { - out, testErr := d.azcopy.TestListJobs(accountName, storageEndpointSuffix, authAzcopyEnv) - if testErr != nil { - return "", nil, fmt.Errorf("azcopy list command failed with error(%v): %v", testErr, out) - } - if strings.Contains(out, authorizationPermissionMismatch) { - klog.Warningf("azcopy list failed with AuthorizationPermissionMismatch error, should assign \"Storage Blob Data Contributor\" role to controller identity, fall back to use sas token, original output: %v", out) - d.azcopySasTokenCache.Set(accountName, "") - useSasToken = true - } } } } } if len(secrets) > 0 || len(secretName) > 0 || len(authAzcopyEnv) == 0 || useSasToken { - var err error if accountKey == "" { if _, accountKey, err = d.GetStorageAccesskey(ctx, accountOptions, secrets, secretName, secretNamespace); err != nil { return "", nil, err } } klog.V(2).Infof("generate sas token for account(%s)", accountName) - sasToken, err := generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) + sasToken, err := d.generateSASToken(accountName, accountKey, storageEndpointSuffix, d.sasTokenExpirationMinutes) return sasToken, nil, err } return "", authAzcopyEnv, nil @@ -914,7 +908,17 @@ func parseDays(dayStr string) (int32, error) { } // generateSASToken generate a sas token for storage account -func generateSASToken(accountName, accountKey, storageEndpointSuffix string, expiryTime int) (string, error) { +func (d *Driver) generateSASToken(accountName, accountKey, storageEndpointSuffix string, expiryTime int) (string, error) { + // search in cache first + cache, err := d.azcopySasTokenCache.Get(accountName, azcache.CacheReadTypeDefault) + if err != nil { + return "", fmt.Errorf("get(%s) from azcopySasTokenCache failed with error: %v", accountName, err) + } + if cache != nil { + klog.V(2).Infof("use sas token for account(%s) since this account is found in azcopySasTokenCache", accountName) + return cache.(string), nil + } + credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) if err != nil { return "", status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", accountName, err.Error())) @@ -936,5 +940,7 @@ func generateSASToken(accountName, accountKey, storageEndpointSuffix string, exp if err != nil { return "", err } - return "?" + u.RawQuery, nil + sasToken := "?" + u.RawQuery + d.azcopySasTokenCache.Set(accountName, sasToken) + return sasToken, nil } diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index bb3a7a219..13cdb1dd2 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -1808,6 +1808,7 @@ func TestParseDays(t *testing.T) { } func TestGenerateSASToken(t *testing.T) { + d := NewFakeDriver() storageEndpointSuffix := "core.windows.net" tests := []struct { name string @@ -1833,7 +1834,7 @@ func TestGenerateSASToken(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sas, err := generateSASToken(tt.accountName, tt.accountKey, storageEndpointSuffix, 30) + sas, err := d.generateSASToken(tt.accountName, tt.accountKey, storageEndpointSuffix, 30) if !reflect.DeepEqual(err, tt.expectedErr) { t.Errorf("generateSASToken error = %v, expectedErr %v, sas token = %v, want %v", err, tt.expectedErr, sas, tt.want) return From 89f6edb71618cc1de600f4e64c4f73afb97d6f8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:08:05 +0000 Subject: [PATCH 104/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.0-20240319093822-58cad130d9dc to 0.0.1. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/commits/pkg/azclient/v0.0.1) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2b00cc1fb..87dbf5c15 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index b51a78a4e..148eb8ee8 100644 --- a/go.sum +++ b/go.sum @@ -466,8 +466,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 h1:9Zp+uWnxdUOoy/FaQK1DjPfLrzw8TyMEf0aaQ0dtq6c= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2/go.mod h1:JKWYkoOyET3wsN0Kk9WxA+zpopkuCy+v4+mrnJ60Yyk= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc h1:YHsVYAF6cioowCy+5vp4d3/GVHyypMhFkc6FgBF5E+g= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 h1:Lp0nALZmvMJoiVeVV6XjnZv1uClfArnThhuDAjaqE5A= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index e9a99acf3..304364a86 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1655,7 +1655,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.0-20240319093822-58cad130d9dc +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From f9d7befdad86abd01aa62b2b92c3fc1298751aa3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:27:43 +0000 Subject: [PATCH 105/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.2 to 0.0.4. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.2...pkg/azclient/v0.0.4) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 6 +- go.sum | 12 +- .../compute/armcompute/v5/CHANGELOG.md | 6 + .../compute/armcompute/v5/assets.json | 2 +- .../compute/armcompute/v5/autorest.md | 8 +- .../armcompute/v5/availabilitysets_client.go | 2 +- .../v5/capacityreservationgroups_client.go | 4 +- .../compute/armcompute/v5/ci.yml | 4 +- .../compute/armcompute/v5/client_factory.go | 302 ++++++++++++------ .../v5/cloudserviceroleinstances_client.go | 4 +- .../v5/communitygalleries_client.go | 4 +- .../v5/communitygalleryimages_client.go | 8 +- .../communitygalleryimageversions_client.go | 8 +- .../compute/armcompute/v5/constants.go | 2 +- .../compute/armcompute/v5/galleries_client.go | 36 +-- .../v5/galleryapplications_client.go | 26 +- .../v5/galleryapplicationversions_client.go | 26 +- .../armcompute/v5/galleryimages_client.go | 26 +- .../v5/galleryimageversions_client.go | 26 +- .../v5/gallerysharingprofile_client.go | 6 +- .../compute/armcompute/v5/models.go | 10 +- .../compute/armcompute/v5/models_serde.go | 6 +- .../v5/proximityplacementgroups_client.go | 2 +- .../armcompute/v5/resourceskus_client.go | 2 +- .../v5/{response_types.go => responses.go} | 0 .../armcompute/v5/sharedgalleries_client.go | 8 +- .../v5/sharedgalleryimages_client.go | 8 +- .../v5/sharedgalleryimageversions_client.go | 8 +- .../compute/armcompute/v5/time_rfc3339.go | 42 ++- .../virtualmachineextensionimages_client.go | 6 +- .../v5/virtualmachineimages_client.go | 6 +- .../v5/virtualmachineimagesedgezone_client.go | 6 +- .../armcompute/v5/virtualmachines_client.go | 28 +- .../v5/virtualmachinescalesets_client.go | 20 +- .../v5/virtualmachinescalesetvms_client.go | 14 +- .../armcontainerservice/v4/CHANGELOG.md | 112 +++++++ .../v4/agentpools_client.go | 40 +-- .../armcontainerservice/v4/assets.json | 2 +- .../armcontainerservice/v4/autorest.md | 8 +- .../armcontainerservice/v4/ci.yml | 4 +- .../armcontainerservice/v4/client_factory.go | 68 ++-- .../armcontainerservice/v4/constants.go | 2 +- .../armcontainerservice/v4/date_type.go | 3 +- .../v4/maintenanceconfigurations_client.go | 16 +- .../v4/managedclusters_client.go | 136 ++++---- .../armcontainerservice/v4/models.go | 28 ++ .../armcontainerservice/v4/models_serde.go | 88 ++++- .../v4/operations_client.go | 4 +- .../v4/privateendpointconnections_client.go | 18 +- .../v4/privatelinkresources_client.go | 4 +- .../v4/resolveprivatelinkserviceid_client.go | 4 +- .../v4/{response_types.go => responses.go} | 0 .../v4/snapshots_client.go | 24 +- .../armcontainerservice/v4/time_rfc3339.go | 42 ++- .../v4/trustedaccessrolebindings_client.go | 20 +- .../v4/trustedaccessroles_client.go | 4 +- vendor/modules.txt | 6 +- 57 files changed, 853 insertions(+), 464 deletions(-) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/{response_types.go => responses.go} (100%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/{response_types.go => responses.go} (100%) diff --git a/go.mod b/go.mod index 87dbf5c15..46f0fae31 100644 --- a/go.mod +++ b/go.mod @@ -44,9 +44,9 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.2.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 148eb8ee8..b60e27752 100644 --- a/go.sum +++ b/go.sum @@ -11,12 +11,12 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 h1:g65N4m1sAjm0BkjIJYtp5qnJlkoFtd6oqfa27KO9fI4= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 h1:0nGmzwBv5ougvzfGPCO2ljFRHvun57KpNrVCMrlk0ns= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 h1:HlZMUZW8S4P9oob1nCHxCCKrytxyLc+24nUJGssoEto= @@ -464,8 +464,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 h1:9Zp+uWnxdUOoy/FaQK1DjPfLrzw8TyMEf0aaQ0dtq6c= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2/go.mod h1:JKWYkoOyET3wsN0Kk9WxA+zpopkuCy+v4+mrnJ60Yyk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 h1:PGATiBUdiGTx2cYWwI5kzCJXeIpj51jfR/noXgo3ihY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4/go.mod h1:gD4GA5AY4cDHirz9VrjKVTxcVWFgjRGPaMRjzOOYYhI= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 h1:Lp0nALZmvMJoiVeVV6XjnZv1uClfArnThhuDAjaqE5A= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index 836f84817..9aeb8a37e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 5.6.0 (2024-03-22) +### Features Added + +- New field `VirtualMachineID` in struct `GalleryArtifactVersionFullSource` + + ## 5.5.0 (2024-01-26) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index f8177c784..9a0140774 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_444bc7d3bc" + "Tag": "go/resourcemanager/compute/armcompute_79d4095593" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index 3e856c7d2..71b871d52 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/41e4538ed7bb3ceac3c1322c9455a0812ed110ac/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/41e4538ed7bb3ceac3c1322c9455a0812ed110ac/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.5.0 -tag: package-2023-10-02 +module-version: 5.6.0 +tag: package-2023-07-03 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go index 3b568f54a..55572022a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go @@ -398,10 +398,10 @@ func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go index 96e32e966..cc26cd0d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go @@ -286,10 +286,10 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -345,10 +345,10 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml index 084ed1b49..c93d36fd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml @@ -21,8 +21,8 @@ pr: include: - sdk/resourcemanager/compute/armcompute/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: IncludeRelease: true ServiceDirectory: 'resourcemanager/compute/armcompute' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go index b8600b614..dab27c87c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go @@ -17,8 +17,7 @@ import ( // Don't use this type directly, use NewClientFactory instead. type ClientFactory struct { subscriptionID string - credential azcore.TokenCredential - options *arm.ClientOptions + internal *arm.Client } // NewClientFactory creates a new instance of ClientFactory with the specified values. @@ -28,306 +27,403 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + internal, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } return &ClientFactory{ - subscriptionID: subscriptionID, credential: credential, - options: options.Clone(), + subscriptionID: subscriptionID, + internal: internal, }, nil } // NewAvailabilitySetsClient creates a new instance of AvailabilitySetsClient. func (c *ClientFactory) NewAvailabilitySetsClient() *AvailabilitySetsClient { - subClient, _ := NewAvailabilitySetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AvailabilitySetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationGroupsClient creates a new instance of CapacityReservationGroupsClient. func (c *ClientFactory) NewCapacityReservationGroupsClient() *CapacityReservationGroupsClient { - subClient, _ := NewCapacityReservationGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCapacityReservationsClient creates a new instance of CapacityReservationsClient. func (c *ClientFactory) NewCapacityReservationsClient() *CapacityReservationsClient { - subClient, _ := NewCapacityReservationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CapacityReservationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceOperatingSystemsClient creates a new instance of CloudServiceOperatingSystemsClient. func (c *ClientFactory) NewCloudServiceOperatingSystemsClient() *CloudServiceOperatingSystemsClient { - subClient, _ := NewCloudServiceOperatingSystemsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceOperatingSystemsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRoleInstancesClient creates a new instance of CloudServiceRoleInstancesClient. func (c *ClientFactory) NewCloudServiceRoleInstancesClient() *CloudServiceRoleInstancesClient { - subClient, _ := NewCloudServiceRoleInstancesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRoleInstancesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServiceRolesClient creates a new instance of CloudServiceRolesClient. func (c *ClientFactory) NewCloudServiceRolesClient() *CloudServiceRolesClient { - subClient, _ := NewCloudServiceRolesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServiceRolesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesClient creates a new instance of CloudServicesClient. func (c *ClientFactory) NewCloudServicesClient() *CloudServicesClient { - subClient, _ := NewCloudServicesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCloudServicesUpdateDomainClient creates a new instance of CloudServicesUpdateDomainClient. func (c *ClientFactory) NewCloudServicesUpdateDomainClient() *CloudServicesUpdateDomainClient { - subClient, _ := NewCloudServicesUpdateDomainClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CloudServicesUpdateDomainClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleriesClient creates a new instance of CommunityGalleriesClient. func (c *ClientFactory) NewCommunityGalleriesClient() *CommunityGalleriesClient { - subClient, _ := NewCommunityGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImageVersionsClient creates a new instance of CommunityGalleryImageVersionsClient. func (c *ClientFactory) NewCommunityGalleryImageVersionsClient() *CommunityGalleryImageVersionsClient { - subClient, _ := NewCommunityGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewCommunityGalleryImagesClient creates a new instance of CommunityGalleryImagesClient. func (c *ClientFactory) NewCommunityGalleryImagesClient() *CommunityGalleryImagesClient { - subClient, _ := NewCommunityGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &CommunityGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostGroupsClient creates a new instance of DedicatedHostGroupsClient. func (c *ClientFactory) NewDedicatedHostGroupsClient() *DedicatedHostGroupsClient { - subClient, _ := NewDedicatedHostGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDedicatedHostsClient creates a new instance of DedicatedHostsClient. func (c *ClientFactory) NewDedicatedHostsClient() *DedicatedHostsClient { - subClient, _ := NewDedicatedHostsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DedicatedHostsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskAccessesClient creates a new instance of DiskAccessesClient. func (c *ClientFactory) NewDiskAccessesClient() *DiskAccessesClient { - subClient, _ := NewDiskAccessesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskAccessesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskEncryptionSetsClient creates a new instance of DiskEncryptionSetsClient. func (c *ClientFactory) NewDiskEncryptionSetsClient() *DiskEncryptionSetsClient { - subClient, _ := NewDiskEncryptionSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskEncryptionSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDiskRestorePointClient creates a new instance of DiskRestorePointClient. func (c *ClientFactory) NewDiskRestorePointClient() *DiskRestorePointClient { - subClient, _ := NewDiskRestorePointClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DiskRestorePointClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewDisksClient creates a new instance of DisksClient. func (c *ClientFactory) NewDisksClient() *DisksClient { - subClient, _ := NewDisksClient(c.subscriptionID, c.credential, c.options) - return subClient + return &DisksClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleriesClient creates a new instance of GalleriesClient. func (c *ClientFactory) NewGalleriesClient() *GalleriesClient { - subClient, _ := NewGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationVersionsClient creates a new instance of GalleryApplicationVersionsClient. func (c *ClientFactory) NewGalleryApplicationVersionsClient() *GalleryApplicationVersionsClient { - subClient, _ := NewGalleryApplicationVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryApplicationsClient creates a new instance of GalleryApplicationsClient. func (c *ClientFactory) NewGalleryApplicationsClient() *GalleryApplicationsClient { - subClient, _ := NewGalleryApplicationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryApplicationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImageVersionsClient creates a new instance of GalleryImageVersionsClient. func (c *ClientFactory) NewGalleryImageVersionsClient() *GalleryImageVersionsClient { - subClient, _ := NewGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGalleryImagesClient creates a new instance of GalleryImagesClient. func (c *ClientFactory) NewGalleryImagesClient() *GalleryImagesClient { - subClient, _ := NewGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewGallerySharingProfileClient creates a new instance of GallerySharingProfileClient. func (c *ClientFactory) NewGallerySharingProfileClient() *GallerySharingProfileClient { - subClient, _ := NewGallerySharingProfileClient(c.subscriptionID, c.credential, c.options) - return subClient + return &GallerySharingProfileClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewImagesClient creates a new instance of ImagesClient. func (c *ClientFactory) NewImagesClient() *ImagesClient { - subClient, _ := NewImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewLogAnalyticsClient creates a new instance of LogAnalyticsClient. func (c *ClientFactory) NewLogAnalyticsClient() *LogAnalyticsClient { - subClient, _ := NewLogAnalyticsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &LogAnalyticsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient + return &OperationsClient{ + internal: c.internal, + } } // NewProximityPlacementGroupsClient creates a new instance of ProximityPlacementGroupsClient. func (c *ClientFactory) NewProximityPlacementGroupsClient() *ProximityPlacementGroupsClient { - subClient, _ := NewProximityPlacementGroupsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ProximityPlacementGroupsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewResourceSKUsClient creates a new instance of ResourceSKUsClient. func (c *ClientFactory) NewResourceSKUsClient() *ResourceSKUsClient { - subClient, _ := NewResourceSKUsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ResourceSKUsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointCollectionsClient creates a new instance of RestorePointCollectionsClient. func (c *ClientFactory) NewRestorePointCollectionsClient() *RestorePointCollectionsClient { - subClient, _ := NewRestorePointCollectionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointCollectionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewRestorePointsClient creates a new instance of RestorePointsClient. func (c *ClientFactory) NewRestorePointsClient() *RestorePointsClient { - subClient, _ := NewRestorePointsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &RestorePointsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSSHPublicKeysClient creates a new instance of SSHPublicKeysClient. func (c *ClientFactory) NewSSHPublicKeysClient() *SSHPublicKeysClient { - subClient, _ := NewSSHPublicKeysClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SSHPublicKeysClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleriesClient creates a new instance of SharedGalleriesClient. func (c *ClientFactory) NewSharedGalleriesClient() *SharedGalleriesClient { - subClient, _ := NewSharedGalleriesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleriesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImageVersionsClient creates a new instance of SharedGalleryImageVersionsClient. func (c *ClientFactory) NewSharedGalleryImageVersionsClient() *SharedGalleryImageVersionsClient { - subClient, _ := NewSharedGalleryImageVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImageVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSharedGalleryImagesClient creates a new instance of SharedGalleryImagesClient. func (c *ClientFactory) NewSharedGalleryImagesClient() *SharedGalleryImagesClient { - subClient, _ := NewSharedGalleryImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SharedGalleryImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSnapshotsClient creates a new instance of SnapshotsClient. func (c *ClientFactory) NewSnapshotsClient() *SnapshotsClient { - subClient, _ := NewSnapshotsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SnapshotsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewUsageClient creates a new instance of UsageClient. func (c *ClientFactory) NewUsageClient() *UsageClient { - subClient, _ := NewUsageClient(c.subscriptionID, c.credential, c.options) - return subClient + return &UsageClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionImagesClient creates a new instance of VirtualMachineExtensionImagesClient. func (c *ClientFactory) NewVirtualMachineExtensionImagesClient() *VirtualMachineExtensionImagesClient { - subClient, _ := NewVirtualMachineExtensionImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineExtensionsClient creates a new instance of VirtualMachineExtensionsClient. func (c *ClientFactory) NewVirtualMachineExtensionsClient() *VirtualMachineExtensionsClient { - subClient, _ := NewVirtualMachineExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesClient creates a new instance of VirtualMachineImagesClient. func (c *ClientFactory) NewVirtualMachineImagesClient() *VirtualMachineImagesClient { - subClient, _ := NewVirtualMachineImagesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineImagesEdgeZoneClient creates a new instance of VirtualMachineImagesEdgeZoneClient. func (c *ClientFactory) NewVirtualMachineImagesEdgeZoneClient() *VirtualMachineImagesEdgeZoneClient { - subClient, _ := NewVirtualMachineImagesEdgeZoneClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineImagesEdgeZoneClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineRunCommandsClient creates a new instance of VirtualMachineRunCommandsClient. func (c *ClientFactory) NewVirtualMachineRunCommandsClient() *VirtualMachineRunCommandsClient { - subClient, _ := NewVirtualMachineRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetExtensionsClient creates a new instance of VirtualMachineScaleSetExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetExtensionsClient() *VirtualMachineScaleSetExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetRollingUpgradesClient creates a new instance of VirtualMachineScaleSetRollingUpgradesClient. func (c *ClientFactory) NewVirtualMachineScaleSetRollingUpgradesClient() *VirtualMachineScaleSetRollingUpgradesClient { - subClient, _ := NewVirtualMachineScaleSetRollingUpgradesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetRollingUpgradesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMExtensionsClient creates a new instance of VirtualMachineScaleSetVMExtensionsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMExtensionsClient() *VirtualMachineScaleSetVMExtensionsClient { - subClient, _ := NewVirtualMachineScaleSetVMExtensionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMExtensionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMRunCommandsClient creates a new instance of VirtualMachineScaleSetVMRunCommandsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMRunCommandsClient() *VirtualMachineScaleSetVMRunCommandsClient { - subClient, _ := NewVirtualMachineScaleSetVMRunCommandsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMRunCommandsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetVMsClient creates a new instance of VirtualMachineScaleSetVMsClient. func (c *ClientFactory) NewVirtualMachineScaleSetVMsClient() *VirtualMachineScaleSetVMsClient { - subClient, _ := NewVirtualMachineScaleSetVMsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetVMsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineScaleSetsClient creates a new instance of VirtualMachineScaleSetsClient. func (c *ClientFactory) NewVirtualMachineScaleSetsClient() *VirtualMachineScaleSetsClient { - subClient, _ := NewVirtualMachineScaleSetsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineScaleSetsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachineSizesClient creates a new instance of VirtualMachineSizesClient. func (c *ClientFactory) NewVirtualMachineSizesClient() *VirtualMachineSizesClient { - subClient, _ := NewVirtualMachineSizesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachineSizesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewVirtualMachinesClient creates a new instance of VirtualMachinesClient. func (c *ClientFactory) NewVirtualMachinesClient() *VirtualMachinesClient { - subClient, _ := NewVirtualMachinesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &VirtualMachinesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go index b84530166..1f7cdb7c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go @@ -180,10 +180,10 @@ func (client *CloudServiceRoleInstancesClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,10 +380,10 @@ func (client *CloudServiceRoleInstancesClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-09-04") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2022-09-04") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go index 2bfc798f3..431a26cde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleriesClient(subscriptionID string, credential azcore.TokenC // Get - Get a community gallery by gallery public name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *CommunityGalleriesClient) getCreateRequest(ctx context.Context, lo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go index 25b118f07..4ecc005ca 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImagesClient(subscriptionID string, credential azcore.To // Get - Get a community gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -99,7 +99,7 @@ func (client *CommunityGalleryImagesClient) getCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -116,7 +116,7 @@ func (client *CommunityGalleryImagesClient) getHandleResponse(resp *http.Respons // NewListPager - List community gallery images inside a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - options - CommunityGalleryImagesClientListOptions contains the optional parameters for the CommunityGalleryImagesClient.NewListPager @@ -164,7 +164,7 @@ func (client *CommunityGalleryImagesClient) listCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go index 5e382b36d..e98cfa506 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewCommunityGalleryImageVersionsClient(subscriptionID string, credential az // Get - Get a community gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -106,7 +106,7 @@ func (client *CommunityGalleryImageVersionsClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *CommunityGalleryImageVersionsClient) getHandleResponse(resp *http. // NewListPager - List community gallery image versions inside an image. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - publicGalleryName - The public name of the community gallery. // - galleryImageName - The name of the community gallery image definition. @@ -176,7 +176,7 @@ func (client *CommunityGalleryImageVersionsClient) listCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index e4858a588..73f3548e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.5.0" + moduleVersion = "v5.6.0" ) type AccessLevel string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go index 8fac494c7..f24e730a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go @@ -47,7 +47,7 @@ func NewGalleriesClient(subscriptionID string, credential azcore.TokenCredential // BeginCreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -74,7 +74,7 @@ func (client *GalleriesClient) BeginCreateOrUpdate(ctx context.Context, resource // CreateOrUpdate - Create or update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginCreateOrUpdate" @@ -116,7 +116,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { @@ -128,7 +128,7 @@ func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery to be deleted. // - options - GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. @@ -152,7 +152,7 @@ func (client *GalleriesClient) BeginDelete(ctx context.Context, resourceGroupNam // Delete - Delete a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginDelete" @@ -194,7 +194,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resource // Get - Retrieves information about a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - options - GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. @@ -249,13 +249,13 @@ func (client *GalleriesClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") - if options != nil && options.Select != nil { - reqQP.Set("$select", string(*options.Select)) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + if options != nil && options.Select != nil { + reqQP.Set("$select", string(*options.Select)) + } + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -272,7 +272,7 @@ func (client *GalleriesClient) getHandleResponse(resp *http.Response) (Galleries // NewListPager - List galleries under a subscription. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - options - GalleriesClientListOptions contains the optional parameters for the GalleriesClient.NewListPager method. func (client *GalleriesClient) NewListPager(options *GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[GalleriesClientListResponse]{ @@ -309,7 +309,7 @@ func (client *GalleriesClient) listCreateRequest(ctx context.Context, options *G return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -326,7 +326,7 @@ func (client *GalleriesClient) listHandleResponse(resp *http.Response) (Gallerie // NewListByResourceGroupPager - List galleries under a resource group. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - options - GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.NewListByResourceGroupPager // method. @@ -369,7 +369,7 @@ func (client *GalleriesClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -387,7 +387,7 @@ func (client *GalleriesClient) listByResourceGroupHandleResponse(resp *http.Resp // BeginUpdate - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods // allowed in the middle. The maximum length is 80 characters. @@ -413,7 +413,7 @@ func (client *GalleriesClient) BeginUpdate(ctx context.Context, resourceGroupNam // Update - Update a Shared Image Gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleriesClient) update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleriesClient.BeginUpdate" @@ -455,7 +455,7 @@ func (client *GalleriesClient) updateCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, gallery); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go index 1ebec623b..cd5d6f3ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationsClient(subscriptionID string, credential azcore.Token // BeginCreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be created. // - galleryApplicationName - The name of the gallery Application Definition to be created or updated. The allowed characters @@ -76,7 +76,7 @@ func (client *GalleryApplicationsClient) BeginCreateOrUpdate(ctx context.Context // CreateOrUpdate - Create or update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context // BeginDelete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be deleted. // - galleryApplicationName - The name of the gallery Application Definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryApplicationsClient) BeginDelete(ctx context.Context, resour // Delete - Delete a gallery Application. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which the Application Definitions are to be retrieved. // - galleryApplicationName - The name of the gallery Application Definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryApplicationsClient) getCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryApplicationsClient) getHandleResponse(resp *http.Response) // NewListByGalleryPager - List gallery Application Definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery from which Application Definitions are to be listed. // - options - GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryApplicationsClient) listByGalleryCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryApplicationsClient) listByGalleryHandleResponse(resp *http. // BeginUpdate - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition is to be updated. // - galleryApplicationName - The name of the gallery Application Definition to be updated. The allowed characters are alphabets @@ -378,7 +378,7 @@ func (client *GalleryApplicationsClient) BeginUpdate(ctx context.Context, resour // Update - Update a gallery Application Definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *GalleryApplicationsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplication); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go index 3d885cb93..06571af3d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go @@ -47,7 +47,7 @@ func NewGalleryApplicationVersionsClient(subscriptionID string, credential azcor // BeginCreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryApplicationVersionsClient) BeginCreateOrUpdate(ctx context. // CreateOrUpdate - Create or update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx // BeginDelete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -166,7 +166,7 @@ func (client *GalleryApplicationVersionsClient) BeginDelete(ctx context.Context, // Delete - Delete a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context. // Get - Retrieves information about a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. @@ -285,7 +285,7 @@ func (client *GalleryApplicationVersionsClient) getCreateRequest(ctx context.Con if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryApplicationVersionsClient) getHandleResponse(resp *http.Res // NewListByGalleryApplicationPager - List gallery Application Versions in a gallery Application Definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the Shared Application Gallery Application Definition from which the Application Versions @@ -356,7 +356,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *GalleryApplicationVersionsClient) listByGalleryApplicationHandleRe // BeginUpdate - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Application Gallery in which the Application Definition resides. // - galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be updated. @@ -404,7 +404,7 @@ func (client *GalleryApplicationVersionsClient) BeginUpdate(ctx context.Context, // Update - Update a gallery Application Version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryApplicationVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryApplicationVersionsClient.BeginUpdate" @@ -454,7 +454,7 @@ func (client *GalleryApplicationVersionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryApplicationVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go index 6a684b3ed..8be1a5784 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go @@ -47,7 +47,7 @@ func NewGalleryImagesClient(subscriptionID string, credential azcore.TokenCreden // BeginCreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be created. // - galleryImageName - The name of the gallery image definition to be created or updated. The allowed characters are alphabets @@ -76,7 +76,7 @@ func (client *GalleryImagesClient) BeginCreateOrUpdate(ctx context.Context, reso // CreateOrUpdate - Create or update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginCreateOrUpdate" @@ -122,7 +122,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { @@ -134,7 +134,7 @@ func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Conte // BeginDelete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be deleted. // - galleryImageName - The name of the gallery image definition to be deleted. @@ -160,7 +160,7 @@ func (client *GalleryImagesClient) BeginDelete(ctx context.Context, resourceGrou // Delete - Delete a gallery image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginDelete" @@ -206,7 +206,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -215,7 +215,7 @@ func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, reso // Get - Retrieves information about a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. // - galleryImageName - The name of the gallery image definition to be retrieved. @@ -266,7 +266,7 @@ func (client *GalleryImagesClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (Galle // NewListByGalleryPager - List gallery image definitions in a gallery. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery from which Image Definitions are to be listed. // - options - GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.NewListByGalleryPager @@ -331,7 +331,7 @@ func (client *GalleryImagesClient) listByGalleryCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Respon // BeginUpdate - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition is to be updated. // - galleryImageName - The name of the gallery image definition to be updated. The allowed characters are alphabets and numbers @@ -377,7 +377,7 @@ func (client *GalleryImagesClient) BeginUpdate(ctx context.Context, resourceGrou // Update - Update a gallery image definition. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImagesClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImagesClient.BeginUpdate" @@ -423,7 +423,7 @@ func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImage); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go index 05f3cf352..b16e42ed7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go @@ -47,7 +47,7 @@ func NewGalleryImageVersionsClient(subscriptionID string, credential azcore.Toke // BeginCreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be created. @@ -77,7 +77,7 @@ func (client *GalleryImageVersionsClient) BeginCreateOrUpdate(ctx context.Contex // CreateOrUpdate - Create or update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginCreateOrUpdate" @@ -127,7 +127,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { @@ -139,7 +139,7 @@ func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx contex // BeginDelete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -166,7 +166,7 @@ func (client *GalleryImageVersionsClient) BeginDelete(ctx context.Context, resou // Delete - Delete a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginDelete" @@ -216,7 +216,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -225,7 +225,7 @@ func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Contex // Get - Retrieves information about a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version resides. @@ -285,7 +285,7 @@ func (client *GalleryImageVersionsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -302,7 +302,7 @@ func (client *GalleryImageVersionsClient) getHandleResponse(resp *http.Response) // NewListByGalleryImagePager - List gallery image versions in a gallery image definition. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the Shared Image Gallery Image Definition from which the Image Versions are to be listed. @@ -355,7 +355,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -373,7 +373,7 @@ func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp // BeginUpdate - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery in which the Image Definition resides. // - galleryImageName - The name of the gallery image definition in which the Image Version is to be updated. @@ -403,7 +403,7 @@ func (client *GalleryImageVersionsClient) BeginUpdate(ctx context.Context, resou // Update - Update a gallery image version. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GalleryImageVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GalleryImageVersionsClient.BeginUpdate" @@ -453,7 +453,7 @@ func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, galleryImageVersion); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go index 28d024e97..0ccc5ce9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go @@ -47,7 +47,7 @@ func NewGallerySharingProfileClient(subscriptionID string, credential azcore.Tok // BeginUpdate - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - resourceGroupName - The name of the resource group. // - galleryName - The name of the Shared Image Gallery. // - sharingUpdate - Parameters supplied to the update gallery sharing profile. @@ -73,7 +73,7 @@ func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, reso // Update - Update sharing profile of a gallery. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "GallerySharingProfileClient.BeginUpdate" @@ -115,7 +115,7 @@ func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, sharingUpdate); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index ffba7145d..6f755a27e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -2385,13 +2385,17 @@ type GalleryArtifactVersionFullSource struct { // The resource Id of the source Community Gallery Image. Only required when using Community Gallery Image as a source. CommunityGalleryImageID *string - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string + + // The resource Id of the source virtual machine. Only required when capturing a virtual machine to source this Gallery Image + // Version. + VirtualMachineID *string } // GalleryArtifactVersionSource - The gallery artifact version source. type GalleryArtifactVersionSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string } @@ -2426,7 +2430,7 @@ type GalleryDiskImage struct { // GalleryDiskImageSource - The source for the disk image. type GalleryDiskImageSource struct { - // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + // The id of the gallery artifact version source. ID *string // The Storage Account Id that contains the vhd blob being used as a source for this artifact version. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 077f40dd3..16bc25e59 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -5492,6 +5492,7 @@ func (g GalleryArtifactVersionFullSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "communityGalleryImageId", g.CommunityGalleryImageID) populate(objectMap, "id", g.ID) + populate(objectMap, "virtualMachineId", g.VirtualMachineID) return json.Marshal(objectMap) } @@ -5510,6 +5511,9 @@ func (g *GalleryArtifactVersionFullSource) UnmarshalJSON(data []byte) error { case "id": err = unpopulate(val, "ID", &g.ID) delete(rawMsg, key) + case "virtualMachineId": + err = unpopulate(val, "VirtualMachineID", &g.VirtualMachineID) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", g, err) @@ -19769,7 +19773,7 @@ func populateAny(m map[string]any, k string, v any) { } func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go index 947d83736..c35ed6b0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go @@ -218,10 +218,10 @@ func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.IncludeColocationStatus != nil { reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go index e0dd1afa4..f716c1ccf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go @@ -83,10 +83,10 @@ func (client *ResourceSKUsClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2021-07-01") if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } + reqQP.Set("api-version", "2021-07-01") if options != nil && options.IncludeExtendedLocations != nil { reqQP.Set("includeExtendedLocations", *options.IncludeExtendedLocations) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/responses.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go index 4bf2d0960..ee8172bae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go @@ -47,7 +47,7 @@ func NewSharedGalleriesClient(subscriptionID string, credential azcore.TokenCred // Get - Get a shared gallery by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. @@ -93,7 +93,7 @@ func (client *SharedGalleriesClient) getCreateRequest(ctx context.Context, locat return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -110,7 +110,7 @@ func (client *SharedGalleriesClient) getHandleResponse(resp *http.Response) (Sha // NewListPager - List shared galleries by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - options - SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.NewListPager // method. @@ -153,7 +153,7 @@ func (client *SharedGalleriesClient) listCreateRequest(ctx context.Context, loca return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go index ad549b140..8f0ef79cf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImagesClient(subscriptionID string, credential azcore.Token // Get - Get a shared gallery image by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -98,7 +98,7 @@ func (client *SharedGalleryImagesClient) getCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -115,7 +115,7 @@ func (client *SharedGalleryImagesClient) getHandleResponse(resp *http.Response) // NewListPager - List shared gallery images by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - options - SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.NewListPager @@ -163,7 +163,7 @@ func (client *SharedGalleryImagesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go index 5c64dc844..729b0e3c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go @@ -47,7 +47,7 @@ func NewSharedGalleryImageVersionsClient(subscriptionID string, credential azcor // Get - Get a shared gallery image version by subscription id or tenant id. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -106,7 +106,7 @@ func (client *SharedGalleryImageVersionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *SharedGalleryImageVersionsClient) getHandleResponse(resp *http.Res // NewListPager - List shared gallery image versions by subscription id or tenant id. // -// Generated from API version 2022-08-03 +// Generated from API version 2023-07-03 // - location - Resource location. // - galleryUniqueName - The unique name of the Shared Gallery. // - galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. @@ -176,7 +176,7 @@ func (client *SharedGalleryImageVersionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2022-08-03") + reqQP.Set("api-version", "2023-07-03") if options != nil && options.SharedTo != nil { reqQP.Set("sharedTo", string(*options.SharedTo)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go index 4f75ccd6f..ae4e62dd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go index a8e90c4b1..e8ff65ad0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go @@ -237,12 +237,12 @@ func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go index 1c5ab27e5..2e0e80d8b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go @@ -188,12 +188,12 @@ func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go index 4151b10d3..756fe975e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go @@ -198,12 +198,12 @@ func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context. if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - if options != nil && options.Top != nil { - reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) - } if options != nil && options.Orderby != nil { reqQP.Set("$orderby", *options.Orderby) } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go index b665b49f5..a816e9a9d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go @@ -439,13 +439,13 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -524,10 +524,10 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -603,10 +603,10 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -930,12 +930,12 @@ func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - if options != nil && options.Filter != nil { - reqQP.Set("$filter", *options.Filter) - } if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -992,15 +992,15 @@ func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, o return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") - if options != nil && options.StatusOnly != nil { - reqQP.Set("statusOnly", *options.StatusOnly) + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", string(*options.Expand)) + reqQP.Set("api-version", "2023-09-01") + if options != nil && options.StatusOnly != nil { + reqQP.Set("statusOnly", *options.StatusOnly) } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -1290,10 +1290,10 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1667,10 +1667,10 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1970,13 +1970,13 @@ func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, re reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go index eb6506c15..81f13d504 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go @@ -261,13 +261,13 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } @@ -348,10 +348,10 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -433,10 +433,10 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -513,10 +513,10 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { @@ -578,13 +578,13 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") + if options != nil && options.PlacementGroupID != nil { + reqQP.Set("placementGroupId", *options.PlacementGroupID) + } reqQP.Set("platformUpdateDomain", strconv.FormatInt(int64(platformUpdateDomain), 10)) if options != nil && options.Zone != nil { reqQP.Set("zone", *options.Zone) } - if options != nil && options.PlacementGroupID != nil { - reqQP.Set("placementGroupId", *options.PlacementGroupID) - } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -649,10 +649,10 @@ func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } + reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1200,10 +1200,10 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1863,13 +1863,13 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go index e1da5fc11..0bd6fc83a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go @@ -372,10 +372,10 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -574,15 +574,15 @@ func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } if options != nil && options.Select != nil { reqQP.Set("$select", *options.Select) } - if options != nil && options.Expand != nil { - reqQP.Set("$expand", *options.Expand) - } reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} @@ -758,10 +758,10 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1157,10 +1157,10 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreate return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-09-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } - reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1480,13 +1480,13 @@ func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.C reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2023-09-01") req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { req.Raw().Header["If-Match"] = []string{*options.IfMatch} } if options != nil && options.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{*options.IfNoneMatch} } - req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md index acc2e104c..1abd1ebfc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md @@ -1,5 +1,117 @@ # Release History +## 4.8.0 (2024-03-22) +### Features Added + +- New struct `ManagedClusterIngressProfile` +- New struct `ManagedClusterIngressProfileWebAppRouting` +- New field `IngressProfile` in struct `ManagedClusterProperties` + + +## 4.8.0-beta.1 (2024-02-23) +### Features Added + +- New value `AgentPoolTypeVirtualMachines` added to enum type `AgentPoolType` +- New value `NetworkPolicyNone` added to enum type `NetworkPolicy` +- New value `NodeOSUpgradeChannelSecurityPatch` added to enum type `NodeOSUpgradeChannel` +- New value `OSSKUMariner`, `OSSKUWindowsAnnual` added to enum type `OSSKU` +- New value `PublicNetworkAccessSecuredByPerimeter` added to enum type `PublicNetworkAccess` +- New value `SnapshotTypeManagedCluster` added to enum type `SnapshotType` +- New value `WorkloadRuntimeKataMshvVMIsolation` added to enum type `WorkloadRuntime` +- New enum type `AddonAutoscaling` with values `AddonAutoscalingDisabled`, `AddonAutoscalingEnabled` +- New enum type `AgentPoolSSHAccess` with values `AgentPoolSSHAccessDisabled`, `AgentPoolSSHAccessLocalUser` +- New enum type `GuardrailsSupport` with values `GuardrailsSupportPreview`, `GuardrailsSupportStable` +- New enum type `IpvsScheduler` with values `IpvsSchedulerLeastConnection`, `IpvsSchedulerRoundRobin` +- New enum type `Level` with values `LevelEnforcement`, `LevelOff`, `LevelWarning` +- New enum type `Mode` with values `ModeIPTABLES`, `ModeIPVS` +- New enum type `NodeProvisioningMode` with values `NodeProvisioningModeAuto`, `NodeProvisioningModeManual` +- New enum type `RestrictionLevel` with values `RestrictionLevelReadOnly`, `RestrictionLevelUnrestricted` +- New enum type `SafeguardsSupport` with values `SafeguardsSupportPreview`, `SafeguardsSupportStable` +- New function `*AgentPoolsClient.BeginDeleteMachines(context.Context, string, string, string, AgentPoolDeleteMachinesParameter, *AgentPoolsClientBeginDeleteMachinesOptions) (*runtime.Poller[AgentPoolsClientDeleteMachinesResponse], error)` +- New function `*ClientFactory.NewMachinesClient() *MachinesClient` +- New function `*ClientFactory.NewManagedClusterSnapshotsClient() *ManagedClusterSnapshotsClient` +- New function `*ClientFactory.NewOperationStatusResultClient() *OperationStatusResultClient` +- New function `NewMachinesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*MachinesClient, error)` +- New function `*MachinesClient.Get(context.Context, string, string, string, string, *MachinesClientGetOptions) (MachinesClientGetResponse, error)` +- New function `*MachinesClient.NewListPager(string, string, string, *MachinesClientListOptions) *runtime.Pager[MachinesClientListResponse]` +- New function `NewManagedClusterSnapshotsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ManagedClusterSnapshotsClient, error)` +- New function `*ManagedClusterSnapshotsClient.CreateOrUpdate(context.Context, string, string, ManagedClusterSnapshot, *ManagedClusterSnapshotsClientCreateOrUpdateOptions) (ManagedClusterSnapshotsClientCreateOrUpdateResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Delete(context.Context, string, string, *ManagedClusterSnapshotsClientDeleteOptions) (ManagedClusterSnapshotsClientDeleteResponse, error)` +- New function `*ManagedClusterSnapshotsClient.Get(context.Context, string, string, *ManagedClusterSnapshotsClientGetOptions) (ManagedClusterSnapshotsClientGetResponse, error)` +- New function `*ManagedClusterSnapshotsClient.NewListByResourceGroupPager(string, *ManagedClusterSnapshotsClientListByResourceGroupOptions) *runtime.Pager[ManagedClusterSnapshotsClientListByResourceGroupResponse]` +- New function `*ManagedClusterSnapshotsClient.NewListPager(*ManagedClusterSnapshotsClientListOptions) *runtime.Pager[ManagedClusterSnapshotsClientListResponse]` +- New function `*ManagedClusterSnapshotsClient.UpdateTags(context.Context, string, string, TagsObject, *ManagedClusterSnapshotsClientUpdateTagsOptions) (ManagedClusterSnapshotsClientUpdateTagsResponse, error)` +- New function `*ManagedClustersClient.GetGuardrailsVersions(context.Context, string, string, *ManagedClustersClientGetGuardrailsVersionsOptions) (ManagedClustersClientGetGuardrailsVersionsResponse, error)` +- New function `*ManagedClustersClient.GetSafeguardsVersions(context.Context, string, string, *ManagedClustersClientGetSafeguardsVersionsOptions) (ManagedClustersClientGetSafeguardsVersionsResponse, error)` +- New function `*ManagedClustersClient.NewListGuardrailsVersionsPager(string, *ManagedClustersClientListGuardrailsVersionsOptions) *runtime.Pager[ManagedClustersClientListGuardrailsVersionsResponse]` +- New function `*ManagedClustersClient.NewListSafeguardsVersionsPager(string, *ManagedClustersClientListSafeguardsVersionsOptions) *runtime.Pager[ManagedClustersClientListSafeguardsVersionsResponse]` +- New function `NewOperationStatusResultClient(string, azcore.TokenCredential, *arm.ClientOptions) (*OperationStatusResultClient, error)` +- New function `*OperationStatusResultClient.Get(context.Context, string, string, string, *OperationStatusResultClientGetOptions) (OperationStatusResultClientGetResponse, error)` +- New function `*OperationStatusResultClient.GetByAgentPool(context.Context, string, string, string, string, *OperationStatusResultClientGetByAgentPoolOptions) (OperationStatusResultClientGetByAgentPoolResponse, error)` +- New function `*OperationStatusResultClient.NewListPager(string, string, *OperationStatusResultClientListOptions) *runtime.Pager[OperationStatusResultClientListResponse]` +- New struct `AgentPoolArtifactStreamingProfile` +- New struct `AgentPoolDeleteMachinesParameter` +- New struct `AgentPoolGPUProfile` +- New struct `AgentPoolSecurityProfile` +- New struct `AgentPoolWindowsProfile` +- New struct `ErrorAdditionalInfo` +- New struct `ErrorDetail` +- New struct `GuardrailsAvailableVersion` +- New struct `GuardrailsAvailableVersionsList` +- New struct `GuardrailsAvailableVersionsProperties` +- New struct `Machine` +- New struct `MachineIPAddress` +- New struct `MachineListResult` +- New struct `MachineNetworkProperties` +- New struct `MachineProperties` +- New struct `ManagedClusterAIToolchainOperatorProfile` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoring` +- New struct `ManagedClusterAzureMonitorProfileAppMonitoringOpenTelemetryMetrics` +- New struct `ManagedClusterAzureMonitorProfileContainerInsights` +- New struct `ManagedClusterAzureMonitorProfileLogs` +- New struct `ManagedClusterAzureMonitorProfileWindowsHostLogs` +- New struct `ManagedClusterCostAnalysis` +- New struct `ManagedClusterIngressProfile` +- New struct `ManagedClusterIngressProfileWebAppRouting` +- New struct `ManagedClusterMetricsProfile` +- New struct `ManagedClusterNodeProvisioningProfile` +- New struct `ManagedClusterNodeResourceGroupProfile` +- New struct `ManagedClusterPropertiesForSnapshot` +- New struct `ManagedClusterSecurityProfileImageIntegrity` +- New struct `ManagedClusterSecurityProfileNodeRestriction` +- New struct `ManagedClusterSnapshot` +- New struct `ManagedClusterSnapshotListResult` +- New struct `ManagedClusterSnapshotProperties` +- New struct `ManualScaleProfile` +- New struct `NetworkMonitoring` +- New struct `NetworkProfileForSnapshot` +- New struct `NetworkProfileKubeProxyConfig` +- New struct `NetworkProfileKubeProxyConfigIpvsConfig` +- New struct `OperationStatusResult` +- New struct `OperationStatusResultList` +- New struct `SafeguardsAvailableVersion` +- New struct `SafeguardsAvailableVersionsList` +- New struct `SafeguardsAvailableVersionsProperties` +- New struct `SafeguardsProfile` +- New struct `ScaleProfile` +- New struct `VirtualMachineNodes` +- New struct `VirtualMachinesProfile` +- New field `IgnorePodDisruptionBudget` in struct `AgentPoolsClientBeginDeleteOptions` +- New field `EnableVnetIntegration`, `SubnetID` in struct `ManagedClusterAPIServerAccessProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfile` +- New field `ArtifactStreamingProfile`, `EnableCustomCATrust`, `GpuProfile`, `MessageOfTheDay`, `NodeInitializationTaints`, `SecurityProfile`, `VirtualMachineNodesStatus`, `VirtualMachinesProfile`, `WindowsProfile` in struct `ManagedClusterAgentPoolProfileProperties` +- New field `Logs` in struct `ManagedClusterAzureMonitorProfile` +- New field `AppMonitoringOpenTelemetryMetrics` in struct `ManagedClusterAzureMonitorProfileMetrics` +- New field `EffectiveNoProxy` in struct `ManagedClusterHTTPProxyConfig` +- New field `AiToolchainOperatorProfile`, `CreationData`, `EnableNamespaceResources`, `IngressProfile`, `MetricsProfile`, `NodeProvisioningProfile`, `NodeResourceGroupProfile`, `SafeguardsProfile` in struct `ManagedClusterProperties` +- New field `DaemonsetEvictionForEmptyNodes`, `DaemonsetEvictionForOccupiedNodes`, `IgnoreDaemonsetsUtilization` in struct `ManagedClusterPropertiesAutoScalerProfile` +- New field `CustomCATrustCertificates`, `ImageIntegrity`, `NodeRestriction` in struct `ManagedClusterSecurityProfile` +- New field `Version` in struct `ManagedClusterStorageProfileDiskCSIDriver` +- New field `AddonAutoscaling` in struct `ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler` +- New field `IgnorePodDisruptionBudget` in struct `ManagedClustersClientBeginDeleteOptions` +- New field `KubeProxyConfig`, `Monitoring` in struct `NetworkProfile` + + ## 4.7.0 (2024-01-26) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go index ba8dcb84c..37ed7db8e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go @@ -48,7 +48,7 @@ func NewAgentPoolsClient(subscriptionID string, credential azcore.TokenCredentia // before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -77,7 +77,7 @@ func (client *AgentPoolsClient) BeginAbortLatestOperation(ctx context.Context, r // before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *AgentPoolsClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginAbortLatestOperation" @@ -123,7 +123,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -132,7 +132,7 @@ func (client *AgentPoolsClient) abortLatestOperationCreateRequest(ctx context.Co // BeginCreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -159,7 +159,7 @@ func (client *AgentPoolsClient) BeginCreateOrUpdate(ctx context.Context, resourc // CreateOrUpdate - Creates or updates an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *AgentPoolsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool, options *AgentPoolsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginCreateOrUpdate" @@ -205,7 +205,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -217,7 +217,7 @@ func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -242,7 +242,7 @@ func (client *AgentPoolsClient) BeginDelete(ctx context.Context, resourceGroupNa // Delete - Deletes an agent pool in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *AgentPoolsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginDelete" @@ -288,7 +288,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -297,7 +297,7 @@ func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourc // Get - Gets the specified managed cluster agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -348,7 +348,7 @@ func (client *AgentPoolsClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +367,7 @@ func (client *AgentPoolsClient) getHandleResponse(resp *http.Response) (AgentPoo // for more details about the version lifecycle. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientGetAvailableAgentPoolVersionsOptions contains the optional parameters for the AgentPoolsClient.GetAvailableAgentPoolVersions @@ -414,7 +414,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -432,7 +432,7 @@ func (client *AgentPoolsClient) getAvailableAgentPoolVersionsHandleResponse(resp // GetUpgradeProfile - Gets the upgrade profile for an agent pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -484,7 +484,7 @@ func (client *AgentPoolsClient) getUpgradeProfileCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -501,7 +501,7 @@ func (client *AgentPoolsClient) getUpgradeProfileHandleResponse(resp *http.Respo // NewListPager - Gets a list of agent pools in the specified managed cluster. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - AgentPoolsClientListOptions contains the optional parameters for the AgentPoolsClient.NewListPager method. @@ -548,7 +548,7 @@ func (client *AgentPoolsClient) listCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -568,7 +568,7 @@ func (client *AgentPoolsClient) listHandleResponse(resp *http.Response) (AgentPo // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - agentPoolName - The name of the agent pool. @@ -597,7 +597,7 @@ func (client *AgentPoolsClient) BeginUpgradeNodeImageVersion(ctx context.Context // versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *AgentPoolsClient) upgradeNodeImageVersion(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, options *AgentPoolsClientBeginUpgradeNodeImageVersionOptions) (*http.Response, error) { var err error const operationName = "AgentPoolsClient.BeginUpgradeNodeImageVersion" @@ -643,7 +643,7 @@ func (client *AgentPoolsClient) upgradeNodeImageVersionCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json index d8f19405a..438b64f4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/containerservice/armcontainerservice", - "Tag": "go/resourcemanager/containerservice/armcontainerservice_d09553bfda" + "Tag": "go/resourcemanager/containerservice/armcontainerservice_ca5abdb7bb" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md index 65197fe3f..0fcac0bfa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/8b5618d760532b69d6f7434f57172ea52e109c79/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/8b5618d760532b69d6f7434f57172ea52e109c79/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/c16ce913afbdaa073e8ca5e480f3b465db2de542/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/c16ce913afbdaa073e8ca5e480f3b465db2de542/specification/containerservice/resource-manager/Microsoft.ContainerService/aks/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 4.7.0 -tag: package-2023-11 +module-version: 4.8.0 +tag: package-2024-01 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml index 1c3615696..176c023b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml @@ -21,8 +21,8 @@ pr: include: - sdk/resourcemanager/containerservice/armcontainerservice/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: IncludeRelease: true ServiceDirectory: 'resourcemanager/containerservice/armcontainerservice' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/client_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/client_factory.go index 996d5cb69..629ced52b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/client_factory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/client_factory.go @@ -17,8 +17,7 @@ import ( // Don't use this type directly, use NewClientFactory instead. type ClientFactory struct { subscriptionID string - credential azcore.TokenCredential - options *arm.ClientOptions + internal *arm.Client } // NewClientFactory creates a new instance of ClientFactory with the specified values. @@ -27,72 +26,91 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + internal, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } return &ClientFactory{ - subscriptionID: subscriptionID, credential: credential, - options: options.Clone(), + subscriptionID: subscriptionID, + internal: internal, }, nil } // NewAgentPoolsClient creates a new instance of AgentPoolsClient. func (c *ClientFactory) NewAgentPoolsClient() *AgentPoolsClient { - subClient, _ := NewAgentPoolsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AgentPoolsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewMaintenanceConfigurationsClient creates a new instance of MaintenanceConfigurationsClient. func (c *ClientFactory) NewMaintenanceConfigurationsClient() *MaintenanceConfigurationsClient { - subClient, _ := NewMaintenanceConfigurationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &MaintenanceConfigurationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewManagedClustersClient creates a new instance of ManagedClustersClient. func (c *ClientFactory) NewManagedClustersClient() *ManagedClustersClient { - subClient, _ := NewManagedClustersClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ManagedClustersClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient + return &OperationsClient{ + internal: c.internal, + } } // NewPrivateEndpointConnectionsClient creates a new instance of PrivateEndpointConnectionsClient. func (c *ClientFactory) NewPrivateEndpointConnectionsClient() *PrivateEndpointConnectionsClient { - subClient, _ := NewPrivateEndpointConnectionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &PrivateEndpointConnectionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewPrivateLinkResourcesClient creates a new instance of PrivateLinkResourcesClient. func (c *ClientFactory) NewPrivateLinkResourcesClient() *PrivateLinkResourcesClient { - subClient, _ := NewPrivateLinkResourcesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &PrivateLinkResourcesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewResolvePrivateLinkServiceIDClient creates a new instance of ResolvePrivateLinkServiceIDClient. func (c *ClientFactory) NewResolvePrivateLinkServiceIDClient() *ResolvePrivateLinkServiceIDClient { - subClient, _ := NewResolvePrivateLinkServiceIDClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ResolvePrivateLinkServiceIDClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewSnapshotsClient creates a new instance of SnapshotsClient. func (c *ClientFactory) NewSnapshotsClient() *SnapshotsClient { - subClient, _ := NewSnapshotsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &SnapshotsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewTrustedAccessRoleBindingsClient creates a new instance of TrustedAccessRoleBindingsClient. func (c *ClientFactory) NewTrustedAccessRoleBindingsClient() *TrustedAccessRoleBindingsClient { - subClient, _ := NewTrustedAccessRoleBindingsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &TrustedAccessRoleBindingsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewTrustedAccessRolesClient creates a new instance of TrustedAccessRolesClient. func (c *ClientFactory) NewTrustedAccessRolesClient() *TrustedAccessRolesClient { - subClient, _ := NewTrustedAccessRolesClient(c.subscriptionID, c.credential, c.options) - return subClient + return &TrustedAccessRolesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go index a8f2d989b..bc010893b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go @@ -10,7 +10,7 @@ package armcontainerservice const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice" - moduleVersion = "v4.7.0" + moduleVersion = "v4.8.0" ) // AgentPoolMode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/date_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/date_type.go index 64a96c1d0..1345db0d1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/date_type.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/date_type.go @@ -13,7 +13,6 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "reflect" - "strings" "time" ) @@ -47,7 +46,7 @@ func populateDateType(m map[string]any, k string, t *time.Time) { } func unpopulateDateType(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateType diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go index 9ddd6bdbf..8dd64e67d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go @@ -46,7 +46,7 @@ func NewMaintenanceConfigurationsClient(subscriptionID string, credential azcore // CreateOrUpdate - Creates or updates a maintenance configuration in the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -99,7 +99,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *MaintenanceConfigurationsClient) createOrUpdateHandleResponse(resp // Delete - Deletes a maintenance configuration. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -171,7 +171,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -180,7 +180,7 @@ func (client *MaintenanceConfigurationsClient) deleteCreateRequest(ctx context.C // Get - Gets the specified maintenance configuration of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - configName - The name of the maintenance configuration. @@ -232,7 +232,7 @@ func (client *MaintenanceConfigurationsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -249,7 +249,7 @@ func (client *MaintenanceConfigurationsClient) getHandleResponse(resp *http.Resp // NewListByManagedClusterPager - Gets a list of maintenance configurations in the specified managed cluster. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - MaintenanceConfigurationsClientListByManagedClusterOptions contains the optional parameters for the MaintenanceConfigurationsClient.NewListByManagedClusterPager @@ -297,7 +297,7 @@ func (client *MaintenanceConfigurationsClient) listByManagedClusterCreateRequest return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go index 4bcbed44a..784c9765c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go @@ -48,7 +48,7 @@ func NewManagedClustersClient(subscriptionID string, credential azcore.TokenCred // completes before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginAbortLatestOperationOptions contains the optional parameters for the ManagedClustersClient.BeginAbortLatestOperation @@ -76,7 +76,7 @@ func (client *ManagedClustersClient) BeginAbortLatestOperation(ctx context.Conte // completes before cancellation can take place, a 409 error code is returned. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) abortLatestOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginAbortLatestOperationOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginAbortLatestOperation" @@ -118,7 +118,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -127,7 +127,7 @@ func (client *ManagedClustersClient) abortLatestOperationCreateRequest(ctx conte // BeginCreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The managed cluster to create or update. @@ -153,7 +153,7 @@ func (client *ManagedClustersClient) BeginCreateOrUpdate(ctx context.Context, re // CreateOrUpdate - Creates or updates a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster, options *ManagedClustersClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginCreateOrUpdate" @@ -195,7 +195,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -207,7 +207,7 @@ func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Con // BeginDelete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginDeleteOptions contains the optional parameters for the ManagedClustersClient.BeginDelete @@ -232,7 +232,7 @@ func (client *ManagedClustersClient) BeginDelete(ctx context.Context, resourceGr // Delete - Deletes a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginDelete" @@ -274,7 +274,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -283,7 +283,7 @@ func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, re // Get - Gets a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetOptions contains the optional parameters for the ManagedClustersClient.Get method. @@ -329,7 +329,7 @@ func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -349,7 +349,7 @@ func (client *ManagedClustersClient) getHandleResponse(resp *http.Response) (Man // [https://docs.microsoft.com/rest/api/aks/managedclusters/listclusteradmincredentials] . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - roleName - The name of the role for managed cluster accessProfile resource. @@ -401,7 +401,7 @@ func (client *ManagedClustersClient) getAccessProfileCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -419,7 +419,7 @@ func (client *ManagedClustersClient) getAccessProfileHandleResponse(resp *http.R // GetCommandResult - Gets the results of a command which has been run on the Managed Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - commandID - Id of the command. @@ -471,7 +471,7 @@ func (client *ManagedClustersClient) getCommandResultCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -493,7 +493,7 @@ func (client *ManagedClustersClient) getCommandResultHandleResponse(resp *http.R // and available upgrades // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - location - The name of the Azure region. // - mode - The mode of the mesh. // - options - ManagedClustersClientGetMeshRevisionProfileOptions contains the optional parameters for the ManagedClustersClient.GetMeshRevisionProfile @@ -540,7 +540,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -558,7 +558,7 @@ func (client *ManagedClustersClient) getMeshRevisionProfileHandleResponse(resp * // GetMeshUpgradeProfile - Gets available upgrades for a service mesh in a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - mode - The mode of the mesh. @@ -610,7 +610,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -628,7 +628,7 @@ func (client *ManagedClustersClient) getMeshUpgradeProfileHandleResponse(resp *h // GetOSOptions - Gets supported OS options in the specified subscription. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - location - The name of the Azure region. // - options - ManagedClustersClientGetOSOptionsOptions contains the optional parameters for the ManagedClustersClient.GetOSOptions // method. @@ -670,7 +670,7 @@ func (client *ManagedClustersClient) getOSOptionsCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") if options != nil && options.ResourceType != nil { reqQP.Set("resource-type", *options.ResourceType) } @@ -691,7 +691,7 @@ func (client *ManagedClustersClient) getOSOptionsHandleResponse(resp *http.Respo // GetUpgradeProfile - Gets the upgrade profile of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientGetUpgradeProfileOptions contains the optional parameters for the ManagedClustersClient.GetUpgradeProfile @@ -738,7 +738,7 @@ func (client *ManagedClustersClient) getUpgradeProfileCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -755,7 +755,7 @@ func (client *ManagedClustersClient) getUpgradeProfileHandleResponse(resp *http. // NewListPager - Gets a list of managed clusters in the specified subscription. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - options - ManagedClustersClientListOptions contains the optional parameters for the ManagedClustersClient.NewListPager // method. func (client *ManagedClustersClient) NewListPager(options *ManagedClustersClientListOptions) *runtime.Pager[ManagedClustersClientListResponse] { @@ -793,7 +793,7 @@ func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, opti return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -810,7 +810,7 @@ func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (Ma // NewListByResourceGroupPager - Lists managed clusters in the specified subscription and resource group. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - ManagedClustersClientListByResourceGroupOptions contains the optional parameters for the ManagedClustersClient.NewListByResourceGroupPager // method. @@ -853,7 +853,7 @@ func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -871,7 +871,7 @@ func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *htt // ListClusterAdminCredentials - Lists the admin credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterAdminCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterAdminCredentials @@ -918,7 +918,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -939,7 +939,7 @@ func (client *ManagedClustersClient) listClusterAdminCredentialsHandleResponse(r // ListClusterMonitoringUserCredentials - Lists the cluster monitoring user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterMonitoringUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterMonitoringUserCredentials @@ -986,7 +986,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") if options != nil && options.ServerFqdn != nil { reqQP.Set("server-fqdn", *options.ServerFqdn) } @@ -1007,7 +1007,7 @@ func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsHandleR // ListClusterUserCredentials - Lists the user credentials of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListClusterUserCredentialsOptions contains the optional parameters for the ManagedClustersClient.ListClusterUserCredentials @@ -1054,13 +1054,13 @@ func (client *ManagedClustersClient) listClusterUserCredentialsCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") - if options != nil && options.ServerFqdn != nil { - reqQP.Set("server-fqdn", *options.ServerFqdn) - } + reqQP.Set("api-version", "2024-01-01") if options != nil && options.Format != nil { reqQP.Set("format", string(*options.Format)) } + if options != nil && options.ServerFqdn != nil { + reqQP.Set("server-fqdn", *options.ServerFqdn) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1079,7 +1079,7 @@ func (client *ManagedClustersClient) listClusterUserCredentialsHandleResponse(re // upgrades, and details on preview status of the version // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - location - The name of the Azure region. // - options - ManagedClustersClientListKubernetesVersionsOptions contains the optional parameters for the ManagedClustersClient.ListKubernetesVersions // method. @@ -1121,7 +1121,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1139,7 +1139,7 @@ func (client *ManagedClustersClient) listKubernetesVersionsHandleResponse(resp * // NewListMeshRevisionProfilesPager - Contains extra metadata on each revision, including supported revisions, cluster compatibility // and available upgrades // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - location - The name of the Azure region. // - options - ManagedClustersClientListMeshRevisionProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshRevisionProfilesPager // method. @@ -1182,7 +1182,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1199,7 +1199,7 @@ func (client *ManagedClustersClient) listMeshRevisionProfilesHandleResponse(resp // NewListMeshUpgradeProfilesPager - Lists available upgrades for all service meshes in a specific cluster. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListMeshUpgradeProfilesOptions contains the optional parameters for the ManagedClustersClient.NewListMeshUpgradeProfilesPager @@ -1247,7 +1247,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1265,7 +1265,7 @@ func (client *ManagedClustersClient) listMeshUpgradeProfilesHandleResponse(resp // NewListOutboundNetworkDependenciesEndpointsPager - Gets a list of egress endpoints (network endpoints of all outbound dependencies) // in the specified managed cluster. The operation returns properties of each egress endpoint. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientListOutboundNetworkDependenciesEndpointsOptions contains the optional parameters for the @@ -1313,7 +1313,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsCre return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1332,7 +1332,7 @@ func (client *ManagedClustersClient) listOutboundNetworkDependenciesEndpointsHan // [https://aka.ms/aks-managed-aad] to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The AAD profile to set on the Managed Cluster @@ -1360,7 +1360,7 @@ func (client *ManagedClustersClient) BeginResetAADProfile(ctx context.Context, r // to update your cluster with AKS-managed Azure AD. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) resetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile, options *ManagedClustersClientBeginResetAADProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetAADProfile" @@ -1402,7 +1402,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1414,7 +1414,7 @@ func (client *ManagedClustersClient) resetAADProfileCreateRequest(ctx context.Co // BeginResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The service principal profile to set on the managed cluster. @@ -1441,7 +1441,7 @@ func (client *ManagedClustersClient) BeginResetServicePrincipalProfile(ctx conte // ResetServicePrincipalProfile - This action cannot be performed on a cluster that is not using a service principal // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) resetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile, options *ManagedClustersClientBeginResetServicePrincipalProfileOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginResetServicePrincipalProfile" @@ -1483,7 +1483,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1496,7 +1496,7 @@ func (client *ManagedClustersClient) resetServicePrincipalProfileCreateRequest(c // more details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateClusterCertificatesOptions contains the optional parameters for the ManagedClustersClient.BeginRotateClusterCertificates @@ -1523,7 +1523,7 @@ func (client *ManagedClustersClient) BeginRotateClusterCertificates(ctx context. // details about rotating managed cluster certificates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) rotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateClusterCertificatesOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateClusterCertificates" @@ -1565,7 +1565,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1574,7 +1574,7 @@ func (client *ManagedClustersClient) rotateClusterCertificatesCreateRequest(ctx // BeginRotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions contains the optional parameters for the ManagedClustersClient.BeginRotateServiceAccountSigningKeys @@ -1600,7 +1600,7 @@ func (client *ManagedClustersClient) BeginRotateServiceAccountSigningKeys(ctx co // RotateServiceAccountSigningKeys - Rotates the service account signing keys of a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) rotateServiceAccountSigningKeys(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginRotateServiceAccountSigningKeysOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRotateServiceAccountSigningKeys" @@ -1642,7 +1642,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1653,7 +1653,7 @@ func (client *ManagedClustersClient) rotateServiceAccountSigningKeysCreateReques // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - requestPayload - The run command request @@ -1682,7 +1682,7 @@ func (client *ManagedClustersClient) BeginRunCommand(ctx context.Context, resour // [https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) runCommand(ctx context.Context, resourceGroupName string, resourceName string, requestPayload RunCommandRequest, options *ManagedClustersClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginRunCommand" @@ -1724,7 +1724,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, requestPayload); err != nil { @@ -1737,7 +1737,7 @@ func (client *ManagedClustersClient) runCommandCreateRequest(ctx context.Context // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStartOptions contains the optional parameters for the ManagedClustersClient.BeginStart @@ -1764,7 +1764,7 @@ func (client *ManagedClustersClient) BeginStart(ctx context.Context, resourceGro // a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) start(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStart" @@ -1806,7 +1806,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1818,7 +1818,7 @@ func (client *ManagedClustersClient) startCreateRequest(ctx context.Context, res // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - ManagedClustersClientBeginStopOptions contains the optional parameters for the ManagedClustersClient.BeginStop @@ -1847,7 +1847,7 @@ func (client *ManagedClustersClient) BeginStop(ctx context.Context, resourceGrou // for more details about stopping a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) stop(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginStopOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginStop" @@ -1889,7 +1889,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1898,7 +1898,7 @@ func (client *ManagedClustersClient) stopCreateRequest(ctx context.Context, reso // BeginUpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update Managed Cluster Tags operation. @@ -1924,7 +1924,7 @@ func (client *ManagedClustersClient) BeginUpdateTags(ctx context.Context, resour // UpdateTags - Updates tags on a managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *ManagedClustersClient) updateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject, options *ManagedClustersClientBeginUpdateTagsOptions) (*http.Response, error) { var err error const operationName = "ManagedClustersClient.BeginUpdateTags" @@ -1966,7 +1966,7 @@ func (client *ManagedClustersClient) updateTagsCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go index e4480e0ba..6b146803f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go @@ -1073,6 +1073,31 @@ type ManagedClusterIdentity struct { TenantID *string } +// ManagedClusterIngressProfile - Ingress profile for the container service cluster. +type ManagedClusterIngressProfile struct { + // App Routing settings for the ingress profile. You can find an overview and onboarding guide for this feature at + // https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default. + WebAppRouting *ManagedClusterIngressProfileWebAppRouting +} + +// ManagedClusterIngressProfileWebAppRouting - Application Routing add-on settings for the ingress profile. +type ManagedClusterIngressProfileWebAppRouting struct { + // Resource IDs of the DNS zones to be associated with the Application Routing add-on. Used only when Application Routing + // add-on is enabled. Public and private DNS zones can be in different resource + // groups, but all public DNS zones must be in the same resource group and all private DNS zones must be in the same resource + // group. + DNSZoneResourceIDs []*string + + // Whether to enable the Application Routing add-on. + Enabled *bool + + // READ-ONLY; Managed identity of the Application Routing add-on. This is the identity that should be granted permissions, + // for example, to manage the associated Azure DNS resource and get certificates from Azure + // Key Vault. See this overview of the add-on [https://learn.microsoft.com/en-us/azure/aks/web-app-routing?tabs=with-osm] + // for more instructions. + Identity *UserAssignedIdentity +} + // ManagedClusterListResult - The response from the List Managed Clusters operation. type ManagedClusterListResult struct { // The list of managed clusters. @@ -1317,6 +1342,9 @@ type ManagedClusterProperties struct { // Identities associated with the cluster. IdentityProfile map[string]*UserAssignedIdentity + // Ingress profile for the managed cluster. + IngressProfile *ManagedClusterIngressProfile + // Both patch version (e.g. 1.20.13) and (e.g. 1.20) are supported. When is specified, the latest supported GA patch version // is chosen automatically. Updating the cluster with the same once it has been // created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. When you upgrade diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go index da9690495..1c83c2acc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go @@ -50,7 +50,9 @@ func (a *AbsoluteMonthlySchedule) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type AccessProfile. func (a AccessProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateByteArray(objectMap, "kubeConfig", a.KubeConfig, runtime.Base64StdFormat) + populateByteArray(objectMap, "kubeConfig", a.KubeConfig, func() any { + return runtime.EncodeByteArray(a.KubeConfig, runtime.Base64StdFormat) + }) return json.Marshal(objectMap) } @@ -64,7 +66,9 @@ func (a *AccessProfile) UnmarshalJSON(data []byte) error { var err error switch key { case "kubeConfig": - err = runtime.DecodeByteArray(string(val), &a.KubeConfig, runtime.Base64StdFormat) + if val != nil && string(val) != "null" { + err = runtime.DecodeByteArray(string(val), &a.KubeConfig, runtime.Base64StdFormat) + } delete(rawMsg, key) } if err != nil { @@ -599,7 +603,9 @@ func (c *CreationData) UnmarshalJSON(data []byte) error { func (c CredentialResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "name", c.Name) - populateByteArray(objectMap, "value", c.Value, runtime.Base64StdFormat) + populateByteArray(objectMap, "value", c.Value, func() any { + return runtime.EncodeByteArray(c.Value, runtime.Base64StdFormat) + }) return json.Marshal(objectMap) } @@ -616,7 +622,9 @@ func (c *CredentialResult) UnmarshalJSON(data []byte) error { err = unpopulate(val, "Name", &c.Name) delete(rawMsg, key) case "value": - err = runtime.DecodeByteArray(string(val), &c.Value, runtime.Base64StdFormat) + if val != nil && string(val) != "null" { + err = runtime.DecodeByteArray(string(val), &c.Value, runtime.Base64StdFormat) + } delete(rawMsg, key) } if err != nil { @@ -2367,6 +2375,68 @@ func (m *ManagedClusterIdentity) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ManagedClusterIngressProfile. +func (m ManagedClusterIngressProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "webAppRouting", m.WebAppRouting) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedClusterIngressProfile. +func (m *ManagedClusterIngressProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "webAppRouting": + err = unpopulate(val, "WebAppRouting", &m.WebAppRouting) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ManagedClusterIngressProfileWebAppRouting. +func (m ManagedClusterIngressProfileWebAppRouting) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dnsZoneResourceIds", m.DNSZoneResourceIDs) + populate(objectMap, "enabled", m.Enabled) + populate(objectMap, "identity", m.Identity) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ManagedClusterIngressProfileWebAppRouting. +func (m *ManagedClusterIngressProfileWebAppRouting) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dnsZoneResourceIds": + err = unpopulate(val, "DNSZoneResourceIDs", &m.DNSZoneResourceIDs) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &m.Enabled) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &m.Identity) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ManagedClusterListResult. func (m ManagedClusterListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2936,6 +3006,7 @@ func (m ManagedClusterProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "fqdnSubdomain", m.FqdnSubdomain) populate(objectMap, "httpProxyConfig", m.HTTPProxyConfig) populate(objectMap, "identityProfile", m.IdentityProfile) + populate(objectMap, "ingressProfile", m.IngressProfile) populate(objectMap, "kubernetesVersion", m.KubernetesVersion) populate(objectMap, "linuxProfile", m.LinuxProfile) populate(objectMap, "maxAgentPools", m.MaxAgentPools) @@ -3023,6 +3094,9 @@ func (m *ManagedClusterProperties) UnmarshalJSON(data []byte) error { case "identityProfile": err = unpopulate(val, "IdentityProfile", &m.IdentityProfile) delete(rawMsg, key) + case "ingressProfile": + err = unpopulate(val, "IngressProfile", &m.IngressProfile) + delete(rawMsg, key) case "kubernetesVersion": err = unpopulate(val, "KubernetesVersion", &m.KubernetesVersion) delete(rawMsg, key) @@ -5681,18 +5755,18 @@ func populate(m map[string]any, k string, v any) { } } -func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { +func populateByteArray[T any](m map[string]any, k string, b []T, convert func() any) { if azcore.IsNullValue(b) { m[k] = nil } else if len(b) == 0 { return } else { - m[k] = runtime.EncodeByteArray(b, f) + m[k] = convert() } } func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go index 4f3e48455..150abb35a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of operations. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go index 22bd5990e..c232eaf41 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go @@ -46,7 +46,7 @@ func NewPrivateEndpointConnectionsClient(subscriptionID string, credential azcor // BeginDelete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -72,7 +72,7 @@ func (client *PrivateEndpointConnectionsClient) BeginDelete(ctx context.Context, // Delete - Deletes a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *PrivateEndpointConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "PrivateEndpointConnectionsClient.BeginDelete" @@ -118,7 +118,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -127,7 +127,7 @@ func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context. // Get - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -179,7 +179,7 @@ func (client *PrivateEndpointConnectionsClient) getCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -197,7 +197,7 @@ func (client *PrivateEndpointConnectionsClient) getHandleResponse(resp *http.Res // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.List @@ -244,7 +244,7 @@ func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *PrivateEndpointConnectionsClient) listHandleResponse(resp *http.Re // Update - Updates a private endpoint connection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - privateEndpointConnectionName - The name of the private endpoint connection. @@ -315,7 +315,7 @@ func (client *PrivateEndpointConnectionsClient) updateCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go index 9a9f09811..c8b35480c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go @@ -46,7 +46,7 @@ func NewPrivateLinkResourcesClient(subscriptionID string, credential azcore.Toke // List - To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - PrivateLinkResourcesClientListOptions contains the optional parameters for the PrivateLinkResourcesClient.List @@ -93,7 +93,7 @@ func (client *PrivateLinkResourcesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go index f95952e40..8d8ba014d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go @@ -46,7 +46,7 @@ func NewResolvePrivateLinkServiceIDClient(subscriptionID string, credential azco // POST - Gets the private link service ID for the specified managed cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters required in order to resolve a private link service ID. @@ -94,7 +94,7 @@ func (client *ResolvePrivateLinkServiceIDClient) postCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/responses.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/responses.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go index 6043d81e1..dd160f7d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go @@ -46,7 +46,7 @@ func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential // CreateOrUpdate - Creates or updates a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - The snapshot to create or update. @@ -94,7 +94,7 @@ func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SnapshotsClient) createOrUpdateHandleResponse(resp *http.Response) // Delete - Deletes a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientDeleteOptions contains the optional parameters for the SnapshotsClient.Delete method. @@ -160,7 +160,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -169,7 +169,7 @@ func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resource // Get - Gets a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - SnapshotsClientGetOptions contains the optional parameters for the SnapshotsClient.Get method. @@ -215,7 +215,7 @@ func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -232,7 +232,7 @@ func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (Snapshots // NewListPager - Gets a list of snapshots in the specified subscription. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.NewListPager method. func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ @@ -269,7 +269,7 @@ func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *S return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (Snapshot // NewListByResourceGroupPager - Lists snapshots in the specified subscription and resource group. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.NewListByResourceGroupPager // method. @@ -329,7 +329,7 @@ func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -347,7 +347,7 @@ func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Resp // UpdateTags - Updates tags on a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - parameters - Parameters supplied to the Update snapshot Tags operation. @@ -394,7 +394,7 @@ func (client *SnapshotsClient) updateTagsCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/time_rfc3339.go index 296cd68af..c671b41ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go index 8509aed09..2cb313e07 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessrolebindings_client.go @@ -46,7 +46,7 @@ func NewTrustedAccessRoleBindingsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -73,7 +73,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - Create or update a trusted access role binding // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *TrustedAccessRoleBindingsClient) createOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, trustedAccessRoleBinding TrustedAccessRoleBinding, options *TrustedAccessRoleBindingsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginCreateOrUpdate" @@ -119,7 +119,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, trustedAccessRoleBinding); err != nil { @@ -131,7 +131,7 @@ func (client *TrustedAccessRoleBindingsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -157,7 +157,7 @@ func (client *TrustedAccessRoleBindingsClient) BeginDelete(ctx context.Context, // Delete - Delete a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 func (client *TrustedAccessRoleBindingsClient) deleteOperation(ctx context.Context, resourceGroupName string, resourceName string, trustedAccessRoleBindingName string, options *TrustedAccessRoleBindingsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "TrustedAccessRoleBindingsClient.BeginDelete" @@ -203,7 +203,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *TrustedAccessRoleBindingsClient) deleteCreateRequest(ctx context.C // Get - Get a trusted access role binding. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - trustedAccessRoleBindingName - The name of trusted access role binding. @@ -264,7 +264,7 @@ func (client *TrustedAccessRoleBindingsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -281,7 +281,7 @@ func (client *TrustedAccessRoleBindingsClient) getHandleResponse(resp *http.Resp // NewListPager - List trusted access role bindings. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - resourceName - The name of the managed cluster resource. // - options - TrustedAccessRoleBindingsClientListOptions contains the optional parameters for the TrustedAccessRoleBindingsClient.NewListPager @@ -329,7 +329,7 @@ func (client *TrustedAccessRoleBindingsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go index bfc856a4b..f5a658211 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/trustedaccessroles_client.go @@ -45,7 +45,7 @@ func NewTrustedAccessRolesClient(subscriptionID string, credential azcore.TokenC // NewListPager - List supported trusted access roles. // -// Generated from API version 2023-11-01 +// Generated from API version 2024-01-01 // - location - The name of the Azure region. // - options - TrustedAccessRolesClientListOptions contains the optional parameters for the TrustedAccessRolesClient.NewListPager // method. @@ -88,7 +88,7 @@ func (client *TrustedAccessRolesClient) listCreateRequest(ctx context.Context, l return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-11-01") + reqQP.Set("api-version", "2024-01-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 304364a86..0a4e9a413 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -47,13 +47,13 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 @@ -1613,7 +1613,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From 69477ced54e39f4e860ae9c18de322c40edd9886 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 22 Mar 2024 22:49:15 +0800 Subject: [PATCH 106/157] Update driver-parameters.md --- docs/driver-parameters.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index c3ed1a995..5525f2263 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -102,6 +102,8 @@ volumeAttributes.resourceGroup | Azure resource group name | existing resource g volumeAttributes.storageAccount | existing storage account name | existing storage account name | Yes | volumeAttributes.containerName | existing container name | existing container name | Yes | volumeAttributes.protocol | specify blobfuse, blobfuse2 or NFSv3 mount (blobfuse2 is still in Preview) | `fuse`, `fuse2`, `nfs` | No | `fuse` +volumeAttributes.server | specify Azure storage account server address | existing server address, e.g. `accountname.privatelink.blob.core.windows.net` | No | if empty, driver will use default `accountname.blob.core.windows.net` or other sovereign cloud account address +volumeAttributes.storageEndpointSuffix | specify Azure storage endpoint suffix | `core.windows.net`, `core.chinacloudapi.cn`, etc | No | if empty, driver will use default storage endpoint suffix according to cloud environment --- | **Following parameters are only for blobfuse** | --- | --- | volumeAttributes.secretName | secret name that stores storage account name and key(only applies for SMB) | | No | volumeAttributes.secretNamespace | secret namespace | `default`,`kube-system`, etc | No | pvc namespace From 69299225eba42fa9b44a1553c48bd200eeeb6d0a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 17:05:27 +0000 Subject: [PATCH 107/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.17.0 to 2.17.1 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.17.0 to 2.17.1. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.17.0...v2.17.1) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 5 +++++ vendor/github.com/onsi/ginkgo/v2/types/config.go | 2 +- vendor/github.com/onsi/ginkgo/v2/types/flags.go | 15 ++++++++------- vendor/github.com/onsi/ginkgo/v2/types/version.go | 2 +- vendor/modules.txt | 2 +- 7 files changed, 19 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 46f0fae31..9b1d22f03 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.17.0 + github.com/onsi/ginkgo/v2 v2.17.1 github.com/onsi/gomega v1.32.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 diff --git a/go.sum b/go.sum index b60e27752..cdfa194a9 100644 --- a/go.sum +++ b/go.sum @@ -207,8 +207,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.17.0 h1:kdnunFXpBjbzN56hcJHrXZ8M+LOkenKA7NnBzTNigTI= -github.com/onsi/ginkgo/v2 v2.17.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 57992854b..44222220a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,8 @@ +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + ## 2.17.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 7c82065df..cef273ee1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -265,7 +265,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873..de69f3022 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index ccd6516fa..851d42b45 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.0" +const VERSION = "2.17.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index 0a4e9a413..de501f235 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -344,7 +344,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.17.0 +# github.com/onsi/ginkgo/v2 v2.17.1 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config From 6ad31c4dfeaae72a3d15ac254bc37244d1beae79 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Tue, 26 Mar 2024 07:37:46 +0000 Subject: [PATCH 108/157] fix: strip service account token --- pkg/csi-common/utils.go | 30 +++++++++++++++++++++++++++- pkg/csi-common/utils_test.go | 38 ++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index d9536010e..434593a52 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -17,6 +17,7 @@ limitations under the License. package csicommon import ( + "encoding/json" "fmt" "net" "os" @@ -101,7 +102,7 @@ func getLogLevel(method string) int32 { func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { level := klog.Level(getLogLevel(info.FullMethod)) klog.V(level).Infof("GRPC call: %s", info.FullMethod) - klog.V(level).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) + klog.V(level).Infof("GRPC request: %s", stripServiceAccountToken(protosanitizer.StripSecrets(req))) resp, err := handler(ctx, req) if err != nil { @@ -111,3 +112,30 @@ func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h } return resp, err } + +func stripServiceAccountToken(req fmt.Stringer) string { + var parsed map[string]interface{} + + err := json.Unmarshal([]byte(req.String()), &parsed) + if err != nil || parsed == nil { + return req.String() + } + + volumeContext, ok := parsed["volume_context"].(map[string]interface{}) + if !ok { + return req.String() + } + + if _, ok := volumeContext["csi.storage.k8s.io/serviceAccount.tokens"]; !ok { + return req.String() + } + + volumeContext["csi.storage.k8s.io/serviceAccount.tokens"] = "***stripped***" + + b, err := json.Marshal(parsed) + if err != nil { + return req.String() + } + + return string(b) +} diff --git a/pkg/csi-common/utils_test.go b/pkg/csi-common/utils_test.go index 8a75286c5..04a8f18ce 100644 --- a/pkg/csi-common/utils_test.go +++ b/pkg/csi-common/utils_test.go @@ -119,6 +119,44 @@ func TestLogGRPC(t *testing.T) { }, `GRPC request: {"starting_token":"testtoken"}`, }, + { + "NodeStageVolumeRequest with service account token", + &csi.NodeStageVolumeRequest{ + VolumeContext: map[string]string{ + "csi.storage.k8s.io/serviceAccount.tokens": "testtoken", + "csi.storage.k8s.io/testfield": "testvalue", + }, + XXX_sizecache: 100, + }, + `GRPC request: {"volume_context":{"csi.storage.k8s.io/serviceAccount.tokens":"***stripped***","csi.storage.k8s.io/testfield":"testvalue"}}`, + }, + { + "NodePublishVolumeRequest with service account token", + &csi.NodePublishVolumeRequest{ + VolumeContext: map[string]string{ + "csi.storage.k8s.io/serviceAccount.tokens": "testtoken", + "csi.storage.k8s.io/testfield": "testvalue", + }, + XXX_sizecache: 100, + }, + `GRPC request: {"volume_context":{"csi.storage.k8s.io/serviceAccount.tokens":"***stripped***","csi.storage.k8s.io/testfield":"testvalue"}}`, + }, + { + "with secrets and service account token", + &csi.NodeStageVolumeRequest{ + VolumeId: "vol_1", + Secrets: map[string]string{ + "account_name": "k8s", + "account_key": "testkey", + }, + VolumeContext: map[string]string{ + "csi.storage.k8s.io/serviceAccount.tokens": "testtoken", + "csi.storage.k8s.io/testfield": "testvalue", + }, + XXX_sizecache: 100, + }, + `GRPC request: {"secrets":"***stripped***","volume_context":{"csi.storage.k8s.io/serviceAccount.tokens":"***stripped***","csi.storage.k8s.io/testfield":"testvalue"},"volume_id":"vol_1"}`, + }, } for _, test := range tests { From 6c657d4fe8183e72c8fa65758a33c2277ab94517 Mon Sep 17 00:00:00 2001 From: weizhichen Date: Tue, 26 Mar 2024 08:47:00 +0000 Subject: [PATCH 109/157] make function more general --- pkg/csi-common/utils.go | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/pkg/csi-common/utils.go b/pkg/csi-common/utils.go index 434593a52..957a26fc7 100644 --- a/pkg/csi-common/utils.go +++ b/pkg/csi-common/utils.go @@ -102,7 +102,7 @@ func getLogLevel(method string) int32 { func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { level := klog.Level(getLogLevel(info.FullMethod)) klog.V(level).Infof("GRPC call: %s", info.FullMethod) - klog.V(level).Infof("GRPC request: %s", stripServiceAccountToken(protosanitizer.StripSecrets(req))) + klog.V(level).Infof("GRPC request: %s", StripSensitiveValue(protosanitizer.StripSecrets(req), "csi.storage.k8s.io/serviceAccount.tokens")) resp, err := handler(ctx, req) if err != nil { @@ -113,7 +113,25 @@ func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h return resp, err } -func stripServiceAccountToken(req fmt.Stringer) string { +type stripSensitiveValue struct { + // volume_context[key] is the value to be stripped. + key string + // req is the csi grpc request stripped by `protosanitizer.StripSecrets` + req fmt.Stringer +} + +func StripSensitiveValue(req fmt.Stringer, key string) fmt.Stringer { + return &stripSensitiveValue{ + key: key, + req: req, + } +} + +func (s *stripSensitiveValue) String() string { + return stripSensitiveValueByKey(s.req, s.key) +} + +func stripSensitiveValueByKey(req fmt.Stringer, key string) string { var parsed map[string]interface{} err := json.Unmarshal([]byte(req.String()), &parsed) @@ -126,11 +144,11 @@ func stripServiceAccountToken(req fmt.Stringer) string { return req.String() } - if _, ok := volumeContext["csi.storage.k8s.io/serviceAccount.tokens"]; !ok { + if _, ok := volumeContext[key]; !ok { return req.String() } - volumeContext["csi.storage.k8s.io/serviceAccount.tokens"] = "***stripped***" + volumeContext[key] = "***stripped***" b, err := json.Marshal(parsed) if err != nil { From 55fcac350e2edf411218a08ad0f28fc65c570d10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:11:11 +0000 Subject: [PATCH 110/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.1 to 0.0.2. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.1...pkg/azclient/v0.0.2) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 9b1d22f03..f220e6c78 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index cdfa194a9..c5252f11b 100644 --- a/go.sum +++ b/go.sum @@ -466,8 +466,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 h1:PGATiBUdiGTx2cYWwI5kzCJXeIpj51jfR/noXgo3ihY= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4/go.mod h1:gD4GA5AY4cDHirz9VrjKVTxcVWFgjRGPaMRjzOOYYhI= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 h1:Lp0nALZmvMJoiVeVV6XjnZv1uClfArnThhuDAjaqE5A= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 h1:quGUBBYzHpEXd6PSNUS9UpheEcwW+AbDtZBIAuzS0jM= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2/go.mod h1:XG0A5DEGP8oKo97GeDAcd2rXOCVX22JeHNLpiDVgfZg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index de501f235..29f734115 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1655,7 +1655,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From e13ee98537660025513f05b004883063fe7d4b3b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Mar 2024 10:01:32 +0800 Subject: [PATCH 111/157] Update workload-identity-static-pv-mount.md --- docs/workload-identity-static-pv-mount.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workload-identity-static-pv-mount.md b/docs/workload-identity-static-pv-mount.md index c494cd643..9972e95df 100644 --- a/docs/workload-identity-static-pv-mount.md +++ b/docs/workload-identity-static-pv-mount.md @@ -1,7 +1,7 @@ # workload identity support on static provisioning - supported from v1.24.0 -This feature is specifically designed for blobfuse mount and is not available for NFS mount as NFS mount does not require credentials. There is a standalone blobfuse mount for every pod, it may cause performance issues when multiple pods are present on a single node. +The feature is intended for blobfuse mount and is not supported for NFS mount since NFS mount does not need credentials. Each pod has its own standalone blobfuse mount, but if multiple pods are present on a single node, it may lead to performance problems. ## Prerequisites ### 1. Create a cluster with oidc-issuer enabled and get the credential From 623498ea4c85cf3edaab030b017868a873442567 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Mar 2024 16:19:51 +0000 Subject: [PATCH 112/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.4 to 0.0.5. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.4...pkg/azclient/v0.0.5) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f220e6c78..9ba6358ca 100644 --- a/go.mod +++ b/go.mod @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index c5252f11b..b31f73702 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 h1:PGATiBUdiGTx2cYWwI5kzCJXeIpj51jfR/noXgo3ihY= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4/go.mod h1:gD4GA5AY4cDHirz9VrjKVTxcVWFgjRGPaMRjzOOYYhI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 h1:ItSTRGcTTVArwQwNOyRVqyIZwaa8QHrYPOGb8Dg5CZM= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5/go.mod h1:PAr/DSzJqB6Pwf609TVAtAMr8uErgASj5tY63w/SNEQ= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 h1:quGUBBYzHpEXd6PSNUS9UpheEcwW+AbDtZBIAuzS0jM= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2/go.mod h1:XG0A5DEGP8oKo97GeDAcd2rXOCVX22JeHNLpiDVgfZg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index 29f734115..49f8b7d85 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1613,7 +1613,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.4 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From bc57a9792f3cd70e4caa42e9393192e19ac040bf Mon Sep 17 00:00:00 2001 From: weizhichen Date: Thu, 28 Mar 2024 06:23:31 +0000 Subject: [PATCH 113/157] fix: workload identity did not work --- pkg/blob/azure.go | 13 ++++- pkg/blob/azure_test.go | 116 +++++++++++++++++++++++++++++------------ 2 files changed, 94 insertions(+), 35 deletions(-) diff --git a/pkg/blob/azure.go b/pkg/blob/azure.go index 4de7c370b..97694ae8e 100644 --- a/pkg/blob/azure.go +++ b/pkg/blob/azure.go @@ -103,7 +103,18 @@ func GetCloudProvider(ctx context.Context, kubeClient kubernetes.Interface, node } else { config.UserAgent = userAgent config.CloudProviderBackoff = true - if err = az.InitializeCloudFromConfig(context.TODO(), config, fromSecret, false); err != nil { + // these environment variables are injected by workload identity webhook + if tenantID := os.Getenv("AZURE_TENANT_ID"); tenantID != "" { + config.TenantID = tenantID + } + if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { + config.AADClientID = clientID + } + if federatedTokenFile := os.Getenv("AZURE_FEDERATED_TOKEN_FILE"); federatedTokenFile != "" { + config.AADFederatedTokenFile = federatedTokenFile + config.UseFederatedWorkloadIdentityExtension = true + } + if err = az.InitializeCloudFromConfig(ctx, config, fromSecret, false); err != nil { klog.Warningf("InitializeCloudFromConfig failed with error: %v", err) } } diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 9643c3a58..6c2f1c766 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -77,48 +77,87 @@ users: }() tests := []struct { - desc string - createFakeCredFile bool - createFakeKubeConfig bool - kubeconfig string - nodeID string - userAgent string - allowEmptyCloudConfig bool - expectedErr error + desc string + createFakeCredFile bool + createFakeKubeConfig bool + setFederatedWorkloadIdentityEnv bool + kubeconfig string + nodeID string + userAgent string + allowEmptyCloudConfig bool + expectedErr error + aadFederatedTokenFile string + useFederatedWorkloadIdentityExtension bool + aadClientID string + tenantID string }{ { - desc: "[success] out of cluster, no kubeconfig, no credential file", - nodeID: "", - allowEmptyCloudConfig: true, - expectedErr: nil, + desc: "[success] out of cluster, no kubeconfig, no credential file", + nodeID: "", + allowEmptyCloudConfig: true, + aadFederatedTokenFile: "", + useFederatedWorkloadIdentityExtension: false, + aadClientID: "", + tenantID: "", + expectedErr: nil, }, { - desc: "[linux][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", - nodeID: "", - allowEmptyCloudConfig: false, - expectedErr: syscall.ENOENT, + desc: "[linux][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", + nodeID: "", + allowEmptyCloudConfig: false, + aadFederatedTokenFile: "", + useFederatedWorkloadIdentityExtension: false, + aadClientID: "", + tenantID: "", + expectedErr: syscall.ENOENT, }, { - desc: "[windows][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", - nodeID: "", - allowEmptyCloudConfig: false, - expectedErr: syscall.ENOTDIR, + desc: "[windows][failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file", + nodeID: "", + allowEmptyCloudConfig: false, + aadFederatedTokenFile: "", + useFederatedWorkloadIdentityExtension: false, + aadClientID: "", + tenantID: "", + expectedErr: syscall.ENOTDIR, }, { - desc: "[success] out of cluster & in cluster, specify a fake kubeconfig, no credential file", - createFakeKubeConfig: true, - kubeconfig: fakeKubeConfig, - nodeID: "", - allowEmptyCloudConfig: true, - expectedErr: nil, + desc: "[success] out of cluster & in cluster, specify a fake kubeconfig, no credential file", + createFakeKubeConfig: true, + kubeconfig: fakeKubeConfig, + nodeID: "", + allowEmptyCloudConfig: true, + aadFederatedTokenFile: "", + useFederatedWorkloadIdentityExtension: false, + aadClientID: "", + tenantID: "", + expectedErr: nil, }, { - desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file", - createFakeCredFile: true, - nodeID: "", - userAgent: "useragent", - allowEmptyCloudConfig: true, - expectedErr: nil, + desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file", + createFakeCredFile: true, + nodeID: "", + userAgent: "useragent", + allowEmptyCloudConfig: true, + aadFederatedTokenFile: "", + useFederatedWorkloadIdentityExtension: false, + aadClientID: "", + tenantID: "", + expectedErr: nil, + }, + { + desc: "", + createFakeKubeConfig: true, + createFakeCredFile: true, + setFederatedWorkloadIdentityEnv: true, + kubeconfig: fakeKubeConfig, + nodeID: "", + userAgent: "useragent", + useFederatedWorkloadIdentityExtension: true, + aadFederatedTokenFile: "fake-token-file", + aadClientID: "fake-client-id", + tenantID: "fake-tenant-id", + expectedErr: nil, }, } @@ -135,7 +174,7 @@ users: t.Error(err) } defer func() { - if err := os.Remove(fakeKubeConfig); err != nil { + if err := os.Remove(fakeKubeConfig); err != nil && !os.IsNotExist(err) { t.Error(err) } }() @@ -156,7 +195,7 @@ users: t.Error(err) } defer func() { - if err := os.Remove(fakeCredFile); err != nil { + if err := os.Remove(fakeCredFile); err != nil && !os.IsNotExist(err) { t.Error(err) } }() @@ -169,6 +208,11 @@ users: } os.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile) } + if test.setFederatedWorkloadIdentityEnv { + t.Setenv("AZURE_TENANT_ID", test.tenantID) + t.Setenv("AZURE_CLIENT_ID", test.aadClientID) + t.Setenv("AZURE_FEDERATED_TOKEN_FILE", test.aadFederatedTokenFile) + } cloud, err := GetCloudProvider(context.Background(), kubeClient, test.nodeID, "", "", test.userAgent, test.allowEmptyCloudConfig) assert.ErrorIs(t, err, test.expectedErr) @@ -178,6 +222,10 @@ users: } else { assert.Equal(t, cloud.Environment.StorageEndpointSuffix, storage.DefaultBaseURL) assert.Equal(t, cloud.UserAgent, test.userAgent) + assert.Equal(t, cloud.AADFederatedTokenFile, test.aadFederatedTokenFile) + assert.Equal(t, cloud.UseFederatedWorkloadIdentityExtension, test.useFederatedWorkloadIdentityExtension) + assert.Equal(t, cloud.AADClientID, test.aadClientID) + assert.Equal(t, cloud.TenantID, test.tenantID) } } } From c95933bb1f014c0144116d89e64c21a34d1518ca Mon Sep 17 00:00:00 2001 From: weizhichen Date: Thu, 28 Mar 2024 07:04:48 +0000 Subject: [PATCH 114/157] fix --- pkg/blob/azure_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blob/azure_test.go b/pkg/blob/azure_test.go index 6c2f1c766..e1c67979c 100644 --- a/pkg/blob/azure_test.go +++ b/pkg/blob/azure_test.go @@ -146,7 +146,7 @@ users: expectedErr: nil, }, { - desc: "", + desc: "[success] get azure client with workload identity", createFakeKubeConfig: true, createFakeCredFile: true, setFederatedWorkloadIdentityEnv: true, From eed96022d814a7f3eadb825345b7460a5200b906 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:12:24 +0000 Subject: [PATCH 115/157] chore(deps): bump build-image/debian-base in /pkg/blobplugin Bumps build-image/debian-base from bookworm-v1.0.1 to bookworm-v1.0.2. --- updated-dependencies: - dependency-name: build-image/debian-base dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pkg/blobplugin/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index e775e8e1f..5df86797d 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.1 +FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.2 ARG ARCH=amd64 ARG binary=./_output/${ARCH}/blobplugin From e4287ed0f6fbaf834b45de31abf74916a5f1895c Mon Sep 17 00:00:00 2001 From: weizhichen Date: Mon, 1 Apr 2024 07:28:35 +0000 Subject: [PATCH 116/157] fix: support setting kubelet path for blobfuse-proxy endpoint --- charts/latest/blob-csi-driver/templates/csi-blob-node.yaml | 2 ++ pkg/blobfuse-proxy/init.sh | 6 ++++++ pkg/blobfuse-proxy/install-proxy-rhcos.sh | 2 +- pkg/blobfuse-proxy/install-proxy.sh | 2 +- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml b/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml index 328a911e9..f08ee6c3a 100644 --- a/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml +++ b/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml @@ -97,6 +97,8 @@ spec: value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" - name: DISABLE_UPDATEDB value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" + - name: KUBELET_PATH + value: "{{ .Values.linux.kubelet }}" volumeMounts: - name: host-usr mountPath: /host/usr diff --git a/pkg/blobfuse-proxy/init.sh b/pkg/blobfuse-proxy/init.sh index 31fa8d7e5..c0cefe953 100755 --- a/pkg/blobfuse-proxy/init.sh +++ b/pkg/blobfuse-proxy/init.sh @@ -21,6 +21,12 @@ DISABLE_UPDATEDB=${DISABLE_UPDATEDB:-true} SET_MAX_OPEN_FILE_NUM=${SET_MAX_OPEN_FILE_NUM:-true} SET_READ_AHEAD_SIZE=${SET_READ_AHEAD_SIZE:-true} READ_AHEAD_KB=${READ_AHEAD_KB:-15380} +KUBELET_PATH=${KUBELET_PATH:-/var/lib/kubelet} +if [ "$KUBELET_PATH" != "/var/lib/kubelet" ];then + echo "kubelet path is $KUBELET_PATH, update blobfuse-proxy.service...." + sed -i "s#--blobfuse-proxy-endpoint[^ ]*#--blobfuse-proxy-endpoint=unix:/${KUBELET_PATH}/plugins/blob.csi.azure.com/blobfuse-proxy.sock#" /blobfuse-proxy/blobfuse-proxy.service + echo "blobfuse-proxy endpoint is updated to unix:/$KUBELET_PATH/plugins/blob.csi.azure.com/blobfuse-proxy.sock" +fi HOST_CMD="nsenter --mount=/proc/1/ns/mnt" diff --git a/pkg/blobfuse-proxy/install-proxy-rhcos.sh b/pkg/blobfuse-proxy/install-proxy-rhcos.sh index 91f51f8bc..2b445de06 100644 --- a/pkg/blobfuse-proxy/install-proxy-rhcos.sh +++ b/pkg/blobfuse-proxy/install-proxy-rhcos.sh @@ -35,7 +35,7 @@ if [ -f "/host/usr/local/bin/blobfuse-proxy" ];then fi if [ "$updateBlobfuseProxy" = "true" ];then echo "copy blobfuse-proxy...." - rm -rf /host/var/lib/kubelet/plugins/blob.csi.azure.com/blobfuse-proxy.sock + rm -rf /host/"$KUBELET_PATH"/plugins/blob.csi.azure.com/blobfuse-proxy.sock rm -rf /host/usr/local/bin/blobfuse-proxy cp /blobfuse-proxy/blobfuse-proxy /host/usr/local/bin/blobfuse-proxy chmod 755 /host/usr/local/bin/blobfuse-proxy diff --git a/pkg/blobfuse-proxy/install-proxy.sh b/pkg/blobfuse-proxy/install-proxy.sh index a642bb3aa..422109179 100644 --- a/pkg/blobfuse-proxy/install-proxy.sh +++ b/pkg/blobfuse-proxy/install-proxy.sh @@ -89,7 +89,7 @@ if [ -f "/host/usr/bin/blobfuse-proxy" ];then fi if [ "$updateBlobfuseProxy" = "true" ];then echo "copy blobfuse-proxy...." - rm -rf /host/var/lib/kubelet/plugins/blob.csi.azure.com/blobfuse-proxy.sock + rm -rf /host/"$KUBELET_PATH"/plugins/blob.csi.azure.com/blobfuse-proxy.sock rm -rf /host/usr/bin/blobfuse-proxy cp /blobfuse-proxy/blobfuse-proxy /host/usr/bin/blobfuse-proxy chmod 755 /host/usr/bin/blobfuse-proxy From 5f1b421b83b7cd3ed68304d958956f2eeb939cba Mon Sep 17 00:00:00 2001 From: weizhichen Date: Mon, 1 Apr 2024 07:39:47 +0000 Subject: [PATCH 117/157] fix --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5967 -> 5981 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index 46130d98b0e1f304fd7d1b7ed972c46bdb5e2ed2..f76109367e956916a38dc69921b82be5e6e70f55 100644 GIT binary patch delta 5660 zcmV+%7USv9F5NDWu79-EIX>-l-W+|`Y9F^xJKv$!{#Yn)fsmMg*LpIo?BM<+4g2IX za6uUDqD8xgeSh_*izNtIb3oXf_)tPAVDY+ik{`ionn%UW365`kaWoc zCCx8M04*D_&s>u*KLs~|KLcw9$MRO^A(QPJ}ZhsKgbeJug5{o<0v>Ba}S-`>a znPb_EN=zv5rov}wWR}m#0!xVIblI%w=huPjjtw_m^vkccSI9zUQ}dSL*IRyor2!dY z4{9|5d_inDw`~?sspi+_fF*R1ghki@3-pD!t{SuH;-D$xXvu;G z8iQ2RF@MFb07RzhA^L);ei#T)e{;mX(1xPNsN~Fbfp^uHw#;H&J+uTyuaIIEu_+pd zEQI`qa~{!WTu*-}wFz`u4tGFQ;}dLWCqGBFftOevFC{iS6BJC6Rx zb&;F{@|h!QK6B1vs4jYV%%gGf1l)BPE;WGZjDIW(T?9fhFVl}u$&d#UJ6PhHtw9nU z5qgF0eiRn={k-L)c)ElIXgdj`Yfcy^a(Urmq45J1%AC?4Q!#}^z!zizh=X7Ow*iSc zm}}PjBtJv_MZ{7_@GRR6gamHcoJeA0S5Y5> z9e*fjEpKoy(M)lw=9Bq8<1(P?rDTW)6s;B=!U9o7Ib#w{mPmkfW*b0`0l1rtYF7$lGtGbJ@ zQec*TX*L_D99sC;hAvVc#4__&LrT`zmw&$JtJz)kS&9Fg2{{D$h4H(Uo{)1QR`X-C zz;)Dua)`ha7aXMoV#blfK+UT}4){PF#3=eW9z@-1NJ%Uwwzy_oc2VbFr$@)dd*2+e zE9Ws-r>3_Dm)KZzk^U>7SM(!R^3XL3DSCKe__=m5h4_`rW=iJWE;`Y;j3f&~u77C5 zSA2Oyis&!Jyr=i3lUC~^37>NKEdY^(vi(3wftTyniFy@`x13n}iNs;vh3;V4MRmPXaOOfIt)Vsyyi zrh$NK&hD2hPA93S8X=)VV%Lq}SD-QQ%BN9pH;x-8^`dT_f_|O2W2ez+w0})U0rDfh zzw&{;BQ6XBFVWM>yr}=noCi(MP@8}99W0^uHaDmXY|r5u%Xt?yJ#D&UU+MQ7;=x#A z&mT|?(jy67>?!-}8n{PmT(g#mHT43V&DzT+6~#yyqa1jBv1aVvZpy%K*n%{)`U-ss#)ei#-1!U?Gj+vd5dA$4rc-j?MfI!k*qVdu zskgfSdp7QUxLj$9dL6w&D;P0i59>jmwTtb6;H5#Q#U65VWTwvaG$)p)Rsp&N>IqM<^CIr)3 zO&N7A!_G=+YBf!O%0VwA^9udKv<;5kU(sNyjp+09o?7h6@G=J&eS+c^7J|$uILN~k z&y*n;;zh(*fQ0J)LVumhBv5ro#6mv`t$0mPs#B;?r%DYn=%Ft}&NbL}R6`Pjo|3;1 zmmbzjd$JB~mzMSaO7MK`yK4@wZu;Yfyq8a=u_^v@+-hh1zbB_BZw~(7=V&)d^{fl( zC^a(+Po)m{E?a_jrf8={s9_ql+6_bY9PGOd$c&dmVi}0n$bWYM3s7gtZY@-@wsetY z(a4BBdw4*N5oox^%Ctp~kBTLO%(6tdDW27_?`Q0Fg(-&wJwDcvW$9h~e_d^rum9UQ zxIS>vkp2@xK^xY8=jik}v;I#{4)NdTX%7$0f1(BPy4pCH5*Osshci!2i#>-f`e*a; zu~ySy{d*r%M}ON4H1q;%Gef5gXt;+53pp?)6i{8;ehRLRtoRiUT!|Vc%RsNi#%B|8 zRFGE-;G8&s{!j)gwbAL3{x9*2QHbhRj0~uw|F3zgU1AJ z*!X!HwPMCKNKDMToaBKkwS&wrGbs{g+_s^8Xk84UA0rpz^? z*hCqV5rFYW8*Ifou!|lZP_(vF-=={IJL$tQwN1yY_AF=F_|D7tbrXE5mxS7G;FOJ~ zgfp9YKBM~JkZ`r<;e5=c#im=2JhXVN$ud3mDBuzCK8?Qo`6_3k7~v}aL1D{v`2+nH zFqxXw$A8C~I`=fhys5>oXb7QvHj?awD=8VHFt3qh|G}QS24Y1x^<~Yn_XF2m#ptGt z$Ig9$iodfThtU!rJLWJOYg2@C@i*OxV>*Mi{m?2zr>3`LH0go8w9fsOTKWE85HHy= zEoA-v-)VJDPtyB;r}gH|!Tx`ic1NhwMLliUdw)P>EtwF8eP1*e?OGK6j2$~_twqWt z77rS34+bi9LzkKGH5Bd^#&x9wsGHCtl;1Yo)pqd*qJ%m?B|28%!Ur9Cj>B2S=%e?B zM6?TaLwByU=a&gKD>GWeYJP`{Sw1x#qxBb(PfWaaG^RM&f|sfj_hYV0thKX_tw!Y4 zN`Lk-j=l)A5xI?;+Am+1JXl3d4E|6%mylcq=_3uE*~sGeYle{60Me)v83b{kMDjtm zVC&3u`vWDZW(H&gP(9SXwt|loxp))gBd2eY_Ifh8Su5TqRV&(Lvm?y)P7^Qrbg77T zu|y_7;(qD$nPFDBq+chn#TZh4@zsYam_)W@S64XvVUr% z^B)>WRrU{i7^R^!S*2w+?rA6STxL^!Qje6W|J{$j|<0ruPAaFrSH} z7^rElTZ^+!J@-)Cuh?-o2yyoP59ht!s6QV6qX5ta>;ShQ%o5uSQNa~ifJ8|u3*=e5 z(`t!=_i!&cwoFQ#747k7>CJHktbfD!*eBL+zBoH+t%H~ZT*$L_s|YBJ^;t7z+@Jl# zDC5_kM_{{{cqZzXV*uzP`vr{PhclddhfDyji<)5uX+tpkE-ORViOkVW;t^+=rX^#c z1CKh8nUfF`3pWAuC?7d)hI_>qNXsES@j8x`qXIi4Wi5t)8FmvENV}6{IDeY7DEw}j zG~x=%A{DUTudY~wI=^hwz&mP+eNn8UC(;_BOdt)U{u-+ldn9rICSAqy8JB0phE1B= zIK8D{X4cAo8qY4`zed1SD_Y+ZKN1cjKLDx%U*yelW9`IkA#$WuB>B|&42e4^@vvcvw`?M z^Umi?GMl+wbaQdN3ma+KEu7-pXdj;>J zOshnaW_BP+C0T6L29(@)o6T}lBHY?2WVKD4V}|PWtl&iq{B(13J-)pjUH$t%#q{%9 zEP*$Jv~Yjfz+n{zx94vg_R>Q2{_1wz`*3@C_5Sv<|G9rzm49kJPq1Vf;I~*uD4mDu zI&ozzv3;kNVChsC2K&R$gVELSV}E#~#s2#I=BJYL5J)>wt<%&j3Pnc~Zv0m;x_N>& zTaT;A+q0dSvYj&nEkqQr<^6sYA>1{_uHsAxw`l{HS@q5VJ6F_Q7U+ldD=ERO_ANF{ zCMc>;!C@RfFMq2EqXkaxLjLs0LKmSttNkXE|NrLX==8AupQDBOe;7PQ z$FM{CpO%h&CjMm*?xZ#XU0^+y97~vmRpQDfCUjtzK{jR(}GfPHA4|VTx*_A5xgGsQtqVnd4vf-hl*%d zk8*#&qVvTz351HpT~h~hIcH|&?4C^*=Xyq@6|(c(Q*(KMGJtvkCv+CxB|_+IFr#es zxPK3Jps(pno$-1t8zziwA~G4rn1vSfXF} zPElA8E(7dZadCYtC5h#r=RKJ^Td>yCFq?nZyLs^K<~Qo`>@~Ad&CJD`?8fkV6f3%$ z#PZc*Kjd(f`&e@x!^|~|QL~xnVd~`EvCZ^UH`Nzvh<^ zqrLvy!TIp^-RNp~(;xPVU8Hr&D4`NKwk28Qk`xW5!Ek(YetCKO_VViOyHDf7KyZJ0 z?zK#(?(=D6?&K-(2peJ%k=xJx(Rgq*+=*D`cA!#X*^zbjF|^Lcv32$`xX!ktTNmcd z(JV3t$_S17H@6?p|9yLP-5=_h>Fw~-$DPKOyMU>brlR*GQeHJWy}|hW?PdS=({=Cs zrr&$J6RphcP^H8^e0tlz?BCp$$DDtYgpBt&+)^_*H-{&bJ!S=BU2sByG_xSt{+7#Z zye{q;VQJ<8Q@B9do3O+n>$3MV`M5E@=SbAtGUk3r7XEW53+u&RH4jeVS)6q$2mCbh zUGs8_FPN?%Zv_{;?N{nH639zJQI{Ph@M}=tUK-<=d=4yB=&O&W1MEJQ^Rs{Nf9Q<{ zpZlZRQUCqz`0C<=-YD`I(lWLtn-T6~&qUM;ZCHbO!^P$M9Y}=5V-B1ZqsplF9rGnm zs#w!IB^o5Gh=So3<65)7H+?sl5h|LcVVNQ(zDlA(+&T3{)}u&*?y3U|mg1p8;Tb9v z%s#iT+Jy9RIN39OR#}V0oKSxEN^N;-^%DdJ= zRH!`AJb#UvgL1@cstvxq>WwP(55_}Sq4FUuNHdU8GMbQBinD?sbI~2CC5-CpeYHg? zB=&sE#`YYPhRm~Cb`x+hX01{!KfV??hcJ6@@Vcoa`fBKC;CHaJu)cpC+WJD|+#(LB zEmh{%RkeGi*G{;;an}r|WZ9;I4ZGDinF%SaZrZZ=Wh--kx3ZT*@yVh1B!{YGTilE_ zy*nPNTPpFlOLa@yYN#%*Qv$A2BBz<3@XG_}&c?bHKG<44=^rmD;_y&^^EK*kc8%LS zN8Qf4vt;2pjWZw5u=zki$hXANso7e~6|$iFXv6*Ml@Fy6d8uey+}Y&1DXVF}5_zaS zdhyz$;v9kUT$G6MBy-G{&DHN7HnrK=)Ia)@ITRfk_A28YDXhBMcK)p7L_|M`aqyp% z`%lWE{*!MM8-KE@g1GA15}hRI#@Om6Oqm$#n)vFbvY`Zyb)ZjjSErAX8_L1f7(TnR zxDy8@cF&V1>Nft((p}gp^~A4d>`66xYpl1*(~-)TSp7~`T0Bb4yhD-T%M=N2Z&peHo3#kGz4UGdiXPo@5Es8|l=-R3=-7~cBEV!4z zDRz^W>FSG*cdsAiN)31Qk3(Re=#o#-MZeVRziLd0GtMt{e^uidTI(Uu0{BD&%s|#( zqq<9k{(n~=chrB(JgNUV%0>g-ZwG?<4%W(4O>J8dt#s{f5#L8@ZQBt67#+%dghAggS)IzQ&@> zTz+j$LQ=bzdNf-S>UYlbpTRd3Z|(`}n8;(x1@q$+I_hF9_`?+s%m2V0yOW?)k?DtrgO8)2RXzVd z<$n^G4d?&%(NQOR|I_ht>u~;mj<(_a|EwE{K`oER?{mXcm9Q6Nlv=`I=u|avz{0Gf{Xoq%ahjwVs-2N{B0RR8&NlU{3ssI2t C22PFu delta 5669 zcmV+=7TW3EF3&EIu7Bk8&1viCxYhZt)jn2_P-}lIl(#@g%)e_rnO1gif0BlM@)@`w zjCRqYUBkY=`qRY{gseFrY)*WspPm06a6oTeHbG;_IGzEz7!QyUo*Km-bWuj6YKs;A zR-@Hu)n0nYhn8sB^}oPw0AjxiVEy`Ux7+PbcKsi>PY&z>&geF+P2z=H> zp3NH`u{jfLDjPQQ)LWNL6cj=nd?tuweAz|KyB|f<7Ni+Tk%o|Xwo)GIZ$EI|YvvMr z*+qltkjZNf0;qav3v$ibf++m}ynp}`qInlgfK!kl6bVJo^5af1$;Gi&FB?d@WC0X} zxaMpE)hI??e1Fv?L9f$jcQzBG%z>U>TqBqfAvxY+psI7YG%X%=PqR>A%(fsS5ajo5 zrkI-!30m#OzZysDN!4tZ1x~n4Tn7`E*ca+at@i2_8o?AeP#chBh*3a{Bad-F0&EUa zsK*mPfdB`6nFG~i2%RA&@1$_8jAY=YZSsFY=8y&LR?pk*>rKxlyS6VK?99J zs_B?wSAPH^Q}qyi!BjsC1gO6`Vqa)O(PLC{=DNVU>PuT@F|Hn30;5+*F^kv~jYAef ze#1GB=rgXTKa|=8IxUAgAgb{RwzHF>(61~7EgBb!Q(KuB3N@ybNv!_TGJzdO|Kqwy z&H?$%5jCGV=P^_lJv`>oxOf8YIt-T@z;s5Ig?}yrA(@xyN2p}T1Bo3han05siH-=p zLU%t33;TZF@=-ip!UD9NgwZu8j1#%MaIw(%feK|#>5r+HLL%S`vH-+Euz=fu#GG+i zRr+m4ZSZBh0QBrM)gtgi(w5HwJZ;Q1YkrcSq5dLbDI|E7?FK>uw`@)%v9YVDkHHQU zw11X2xR+?AI92n>e4lX{Q1wzW!~=>}iw?lq(&mJ?fCGcLQR^RLsR zJR(K(mtx-2d(%m)^^t^6Is6uYNJ80uprpXdb?ZdE3dUPbto=k{v58bpqw_=>*^T^b zO&y+2iCWH~ic{Ku3`(^!9zK%H1s-Z&jm1KDu@Q?q#nPRLeg&s)Fl)? zGdJ+y11!&N8-)0o$oaqlDz!L7i`>KapD12n;wtUbMeQP$b5c#^9OaY-P=)oa%O=x6 zz%^(0%N3`S)KiU+P$9AFM(``p71AHj|7FgDre~i<+J`-LbFq`wj76EV1Vg zs0Qhggf8}!{dEo8BSIK!sP*Q}o4oFQR&7{%>Um)W{8S?NcRCecu(an6n|<98?WBYY zb&*+5=Zf`@`jleit~LcM1Ane%VJhBp7WmPrZUor*nG*?D)CQ#j-i>dTKCE!P2Wj-g zSYmZnGO}1Rc5gRj;5TeR8d`mYz64`KD5WnwFe(kpmNgX|1M= zI+tN*r8KpgCP3w&7m|5}eqq`M$L_CaFx5u%`FT$*c4c^(1B^aFaSIDUW)vLcVTxzU z5Df7mVk|&Hb$_AGWq%T=IwWGDAB9%DCMeY@RH##>1{w6w7b52xY&)tUi9t`vUx-T& z>!m$ehqg=0`hO*OzV_WUhgUcKaYNqAC)3yz|2b~8GydO`)6>?$|N9*6MyZ~4K^>)L zX5p#S0pDdy(9RU?vB%Af`#kO8q4`g=AYNA+2UFsLT>5b4scEt2&_(}jK0elJ z8mxcsW9n#|fq#ZxU~OjTlmQL*@L(Yari22jYuiu3)sYpy!htJM!(u;i-9)S6Ocv3`vX!n`YQh)XTS4Z{R`YwZEp2w8AW)zz! zV=@9T{%C`(SO<2|!vl)ecIw+SP+=#1IHtDgnAM);EF0f>8NY6VPxX>e+YOwu(Ufpz zGtXyK9~=^{_B@=AxwP1H>yd{RuQgew#~uYdBHpLbmp@3V-)5!lI%a&bJswu2&cZRS@wS5x~mx7l=0ZP zFHrG!_Tw;G;$z1gW@BxNa4!C)J8?{Bu(lssh3M4umW(Dnu$R`k-%=~z{|n+JJEn!K z-~T(U&gn^d|L?Tb>%;#44DF6kr;B>pu=jw6=!Pyc;cF<|EsX0*2T(VmMJT^*xU22r4@3!dfJ$_%z=aPw^c;t?iqS{!4T)$M z>W1!IY0obcY*uEph}HZK7qfh7I!5a+B%hdg?PyGKvIQ?yC+^2wmso3O9b1jatCj3y z9DjWgXd`kPHML*9E_twuni%|{b}k{g3era!JhPF-@7D|=u>qt}Co%})K8fUmZo$@> z>GlUoQq2s=2%vhXeQgCFDRS{9$VX1!B<=NNazyWE^663$?P7^c zfW-aM=`+Kua!J2VV2d%N{Nk$*pEkjm=zk*Z%q-3dWjAJz>0x~aJ-m+CCB zWE{N$$y5xogz`F}OHNqEKHMZ|1sNlWkCl6yG;X8MozB)+u`a+?hW#~X>I@n`M@XGV zyJ!?pNj&J86Xe?QC88oR;Z&$(ixJ1RZp@RF2;C+#5yQ_s$KslUKH)X%>1EYO=YKym z;zYcp(hApU7u6G!Yx+l7n&|N{_ii2T?j~r7XXx>G3Am7F?N$*`80)iU%D6xKiBZO{ zKaaq6G4V{)FUJ7TMfM9A!4GFR^$wW;To*ON4AO>R_FYznt`nJ~o5Ul|GEGayLI)mo zATuW+CKhf2=utj$+zj`MFOZf)c;a;&D@O%(M#@?Y0W<6-ERc35$#67jQGfW|GHJvW zltn6Fzh7Ok26cYfrh#|V6#JrBMNgzPLYY7sNc}ZdEA~j_08F}y?qi~;jgycS&K4} z3$q}K_aC5+&j^)g-~Vti9)H~ShrR2o!SJSNM+}p;R*OZKXf;~hVzP-8uVTu^i?oWm zg&54!=tx?$foq4PWl|4jKxIMiOxj2>q8NiEw3odAQ6|%*gd?44XA-F`VFZf_Gbg}d*+?b znPfI|yXfZPdKWg*vRg}Dw$)m$j=^Ts5nXiDO2LF1-0F2Z{n`gkm{Z)j+zkBxpGN)Li&4MVAKnbk zFK^!sF8f7B;XBT}?0;DG(P~O&AF=-dmLr(v0iw?m=Du=pEOj_6fYV94YW51=MVVHK zB+cwVl1j4Js0}E&?>3v|rbM{4QOIhWIL8dt>si5z82IVt=6Za4J-Yh$e~RhnwO9gg z25I5`vVp@Y3~tZgHteN^>iyO2xcA}q^6LHVW&d;kvMSYlo_}D;G{A4Mj!-%e)pg>^ zSYrE5E5XvKFbwvGp9iC>;m7{)MvMLR`OQxy=OK`GqFSe^Srm$nCfxY1V07~YZMGg) zk+)|%Gi5tx23m+HUd#LaDnhtxj9tZ<5N^{3F0<;L19q;cyDZQT>sL~OS?ybFm`qSq zpMt|UeqL4+$bT*WF8dL)1pGVFw6P_5AY@Z}BW#)ZBHK^Q4rd4@vm1{N$!gSUF1D~n zX7BE`LcE1;t_X5>x7rhFRXbl)b(|Ht-JZjl_IGQJspEoW6qrS5Aix6789&oG=YM}$ ze^oU1-}+%aX0>Fw8Ac17+=cwTR#bDP~^Z|s!s0vnNS*b+9y95aEP@`z87$Ywhi26Gb}ymZpU zU#L~c{|Qa&-LHh%l>dKndYZ}qX|>y(lW`B5f7a=n?EP;E}gnE!{tV{{BV zr2lE@*k|Hj2H{R>BhUrbW680ESy&~mY+^!ZruI5AgR*t(Sw7P5BaXt9lqAAkG6!=b zeUY5O0xk~Of-jaWSU9kkNazb$U_31-1y?f^0mQZD=@h~1K`-S_TAxRl0CcE`cJ(L+ ze=Is*Y?DB!Sll&rFqd;?R?hC(baAd{L|P#`&pkDl2Pgxm7jQyn;awtx&IU8eR*(Dl zU0rePhrKlt`!&8$5N754tm~`sj~%ZJq@#Yf4!Rr-)?@R9?xDg8`aEQtjTT+uSc<>yGblx zE%rkWN4bwR=P}G&!x%N2c^;-t&K=uKPjyp$p@x_zeWIoE6-w`dxG+-(rHsqI)HT}c zza5+pZ{LlshBy6Tuh>Oer;HLRfn!^eMJ`FvU>Xd^H|Lj^w{I`6-oE=ZE(`>xf9GDy zbm~5zM&?eQ0*|mE77@Aq+#ihxSHqo%Wo`#5C6*moXCFiBY#du>FN5oBJGymY-W<&$ zbD)gSxPNo|@%-PnSJ(Zaj+x#LKYiS3Y`F`VN@*&3Pa@@2qthFV&);75Z$Dl4&Tsm? zw>#0w+zyo?wmFobY$q!a>jDW1f25fO$@aHgX5)1+$_Ps{517IQ(%yt623ePBpUIt# z@jXXU<(BdBLt^irJF!AOcQ&K=;$1fOeF|LI0nMiOr zboBVx^xa@asA!gkP>PuNDoF)#=hPQjk0J@Ws}3w!iic`~XQ(DH``o_j2+~K_WY6@u zVl5JLn#kbF@m%f9QMa^Se_dk62^tOLHyz2FDhvy&rMU;1~Tc}Vapn3ip^#SFG z*Hi|4dzBVdY88w(uR@hVT99TSqr@>Gu@q+o0pX(iOiLKmSMh3#Qb_FimW}N>C=Hos zwd~g3V$52lT7Gbnv-O3@xkVgMe_N`|ud8ZzK(C!} zeLJohPRX)O1qODjM=}#qT8Xq}p~_a~{%$2IheDA1N0BJ`;VtiSx0vy~{Nm@jp)Ro}<HLqXrm6!dMA?-Un*V$;7?6=Zq+51(M$+RG*28|wc~ zk4}%W_kX`>AMXEtmX;A<_0b#VVoH5+e^~rye&%PyTu2oNX=ohCIpgGay-OwP2G>>% z>7Jp@V8OivPO+Q3OjTceyL)e~s!c5&Be8-wp)z9h{Y^n%br!TIt%&BFZ6z?P4J+ zj3R>0k|5Gjj*>L`EA02hF_x~Ipw-4w4=V%+w|IfM+=sJXNX$yEE#h;vqZ$;%N zFHU$CWnUl+2Q|O{R<8d;0l-E8p5h7Kr2kvF-+w%A9sIw~({cffw4oR81ewgmLPfIn zgtozk-2URIEtKH4cN0~+mne1l&$@0{2E9d`)P$_%ViU&_D6Yb>f!jzriSA+3@8=#= z>tCmBum86{32fB=CvP(PzjJbO`2GK9X)mV#V}tUb`45`^&<^d;4(-sMx&2=N00960 L9Pgok0H^=}Y%Cn< From 8a7e4f4f15981098878204507dca019e71f5108e Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 1 Apr 2024 14:27:07 +0000 Subject: [PATCH 118/157] feat: use new restricted liveness probe endpoint --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5967 -> 5976 bytes .../templates/csi-blob-controller.yaml | 8 +++----- .../templates/csi-blob-node.yaml | 9 +++------ charts/v1.23.3/blob-csi-driver-v1.23.3.tgz | Bin 5944 -> 5949 bytes .../templates/csi-blob-controller.yaml | 8 +++----- .../templates/csi-blob-node.yaml | 9 +++------ charts/v1.24.0/blob-csi-driver-v1.24.0.tgz | Bin 5959 -> 5965 bytes .../templates/csi-blob-controller.yaml | 8 +++----- .../templates/csi-blob-node.yaml | 9 +++------ deploy/csi-blob-controller.yaml | 8 +++----- deploy/csi-blob-node.yaml | 9 +++------ deploy/v1.23.3/csi-blob-controller.yaml | 8 +++----- deploy/v1.23.3/csi-blob-node.yaml | 9 +++------ deploy/v1.24.0/csi-blob-controller.yaml | 2 +- deploy/v1.24.0/csi-blob-node.yaml | 9 +++------ 15 files changed, 34 insertions(+), 62 deletions(-) diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index 46130d98b0e1f304fd7d1b7ed972c46bdb5e2ed2..879affe2a2c21c1804debb348138bbae3bbf6d48 100644 GIT binary patch delta 5793 zcmV;S7GCMkF4!)Rc7JPyzGzG?b{F7cvDgP_;0${&)*KKvC%)9r&VLU$ptmlYpntJs9M1qMuzvlw+wFEIyZ(>cox}Qnjz&B@gKiC> z2^KH{pLLOE^M*%k&IFsvhRr0N71weX+~0{Atauyl!yA; z4_x<}xx`*}(SKk%Wb&GW0IFWvf?RX9AWA;~FCf5#Xx;@A;1nbXMMBZD{J2w0a&fHH z%LbAzSpWqgt~r}PHHuLeUv){)>onS(%>*fPpr;qt2xde`j<*=7>Kraji$~qlEL0e? zEyxH2`F)!y=B7h}R=e@9#?g9GHQQx@6K)gN!Neu@g@1ZdtG#-KMlb~q)CMFOViXYL z$YUIk0Gopp>hT0nAizOi=0G(WLT5HeDPv zWgIP8&_H95YC5La6@bW8Jw#tH)ei#!>TizN7urzt7?qs4F7U4U(w13_tB01r=oM1T zA~r?ikcE)naLyz8jO*zSr8a?1%i#`)YJ7t2?0=*v^ean2i^hfG)K(^jLX9b9605(o zOkl^+|8rd==YV|Xh?>uw^BAg&9v<^(Ts#4H9fnH{U^*kqLKlIM%**s6R5IjŸY zW^0f{M}%IXyB~#xeLrvcD4s510oqQ&=$aG8iCkW|SZMq}g)*n~$5c!q5%2|B0OBB6 zz<+H(V$Qg%D*ZO2Huy4L0D5+sY7zJ$Y0KvTo;K#1H9yJEP=67z6cRkkb^{@STQ(<> z*w|In$6yBvTFV>UOEgoQs`+HT&$tYzdMO#=0Y$4thp<4DQO=k|lO+-$rCQQhEqaaa z!eCPrCQAhO*z;XLUG;Ycq>COD?sM@;D1T9Jw_0uVSjz)F)@q3*u@bg|rJ>|fsizA) zU-7E$BCHgcrC*xO#wmvuKDMEY)CaN5{MC?>HTI?N`D%7oeOBT>XF?7^eqsD>r6=T^ zh}HbqEN~sQpd2Fb#05twftYdRFi`U7>>ANW!NaehWY(p=>`;QsCvfb)sGc<1HuFej>5hL@KAz zc_NMMM*g*?4o{~9+&w5(=N08+h;mmglw&Li|kReBc0;S{$N9?&14S6fZDwm3Hc)c9F_Cseh(&j&e!^ zsKWZzWs_+j;F`1h<%-iu>ZwLZsF2uoBls0)47~Db)Z2~Y#!0=XTc@C3C+^s3bQ*2b zQGoo2@2`BI?}!V-z)SS>GB4`?GUq|lGt}mvd^}MhGeku|CJDrLzSlV-k z&A#r4c2dHHy2z}jbH(~ceM&KMSDONs0oSrH74JC<{OD9S0_^s^ zYCN{);CkwRdmk=WnxbAuuh2>dA>+AHkAI*{DHQRK*QnQj`|15_G#I|SdX0WQ z9}Ne?_pecZG`booGfdgip?c9ws0Z=iHJ8xfUX4bnQWsX{Jf_YyE44Y0Y;yoxP^rv zGYSs!FvT-v2!?nOF%}@9y1!88G6_^260y*aLMvVql5xB90|AD8UOFe>B-5HZU<_A z)zwz{`oEon>jM`J=|3?Pv|;^sj!us=>;LrR5dVFi_VCdBCt47%tBr#xaX~J9IP=uB z*mLNje>NW)dat zmGA!r@sb_WLe}s99ra&&|L?TkoE{(c|L16TggRZ+(}ukVRMwISVc7RYbJ4Cv;m_Ey zqt;raOk(k%;r3vlLN|1o3136uZed(kI)J(fEkgNi!(D9`e;`Vz15~1aV+AgJ(4prz zoK=iIdT&TXyHGcD=Sq8inP9UrqeZOdcet44Q`0e8e|ieefYEq#zYrsXKtCGCDlP3?@kEP`mko` z*Gh5#SA2oA9KsW?<5)Q=urpHDVhEUFH(`OaJ4uG4 zNsGeomPsS7pe#}W`~B*QHK_B;HVwR^rq~z7DtaQV5y}M8Kr={J z#XgTQFMAqhMa8TAY=LT$mA2ycew`_xm3%#)I4buy=hm7~T}^j$s1VYT@W| ztwyUv(i7=k#gvUlX%&GBF_@>(@w8|&*Um}Hq#?|IfXag2nG}*@L@@?SXpegVqDVInIB z$5JP~0yv$4tLC}jU6eMJ$hgc7B&p?!E!2R2lKXD6S#B4EdlQAMx`-3TP`#f0auEYR z-P~M{Z?8vJ|Nc)g{k$4V;LRW{+#oh^ScSpu`P+uQv{1djx*hjE++JS2zrE~#?q61= znos&GnFjbRUU5okp}NjO8B1*6X)RVd6^6n7@bh4FHT>8g-e|GEKEL^?)dX_Ozsr8aECK(HG;M529thdgt_53WzR30y zv%?`k$?QhkKUrE@&0iK4yzJdqR*1KM&}S7v?(QRdBCTo%gsNV!LLb_5c*g#2%`tUc zu#5t;2n_^Sz&Yb*I)VJ}FYA1Y=KfnhZO5#ZEH}ewfycU#KYcXN6(tY(^;)Rnm0hd{ zlQugyZyd&#-y>WHAC_0#q;0KWcG@|LRq;k1^Ln{F9=Xl#us3$fcY%$_Hf#xhn*xZL zC{20zrbuM7oeLwg2@YO51LCjUD&+r!*7)vM!feX_KRG?k=Kr)?Zw~oC&(SQ)N~Qb= zle6Er`_beswreIs=wduD<$UXtfzsZ+Op4DAxjR@&Y|lsVTR=ECam}O8oB^G(E1Ill z%T=)@7z@K+jTD4o(d?F&WD53w(stn^b6{0Ycdh0{NOO&fdeHah-~q?3F2B12I!e{` zSqKLX0i8IDebQ|<_0DzPyHT+HdwY)Q489?qS7H5A=D?FKgj&D;k6I_EZ!-D+Z%&R6 z^*_(j!u&rB9;0K}A^lHF$37GPG6;848-Xsc9!riT%)%;hWkwS^Gqu-$k>!-FW6$!D zejjlZrlceh?vgo}8|jPW3>I*4$QFFDY{9~Ty+lG^$O7YOK`FSJp$H(ZHBYAqUJrUH z57hcR!UUj0MYO9&IbhL|WSay+#p151gSnhDvvPLNri*huBhm`ldG4vXJU|&hy?_%s z3-1ykbT*h#wtC#Z2RqPz*L0?ig-Hf)fO{evo3V_p>fieu!W$$CA}`_1DAL>lkZkHp<I*f*Jn2R)m9J2G z7sQ2`Iw)nl{H5O1UjOaje0ckAbTz!`4|~O4)jDOAPzfB{k}PsbiU!kQIKDZ*yu5vT zdG+?)r*UEII6e1Trc?L%G%|Pc6gZI$v53g+=l*CsxEk(6EOR?hDY5LxI{O$}XXDs9 zdl_73+tICy4CiQn7MTNOgvR}w+mGk}zP-Bc4|SOJcKGSzPGiemz*I_8(R&gpuNs}+ zV0`}evVZ&Ox_5rl@4elLR_1o746)782W2~1fmj!NP$11LNVdPFlV2)eY32b_xIo&Q zu*4wiGVL?DvoXHsNTl2{q<%>3{qrRD>P1>Lb4%e|oOLRH$M!VxU2|xQFNUrldj%J> z?N@5Iz%sNDhaN9v_>&8_Wn5&C+m65ffh}Cn4^f`XcL5Btdu8fdxzPP)+a*)dXgr z+gBYy`WTz+nLcBzMPg1C8C*G@tDQgUmey;;Is5*H51-!lZ--aC{_XJmW50;*iN9S6Ptv=5b#a{%aGer4&HPke9zb_C*0Auw*6UmUNKg@nhtim@Q5v&reB?Pw zYSx|o3eQ!X`FMuS4==tYj?S9aTCR`<-A5bRU$3$!jmV?f{V(|gkjl#0uS6cof?m8V zs5lm1p7Rkgo@9>svbp+w!KOA7oBGGK(yT>)f~c&>-0L((mrR--a{?E#vl)dCCRXwL z{gy>#ycvZBQQOX+m7Iv^hbIpHj&grTS#&)$f%TW)a<&qs6!WDnvFiKS-q+ysZf>~` zUs~~05EETnJ(C387#H1yDH9)E6C2%B(UZWj4)jUx<@8Zi^nrT8H|dXK9&5pgwxT zTuiAi?hlLq%+LIcmq5)#tGWB|`sSo8jso zW}ei)9A!Iz?zaO$eFtY{s;0K7h*r9GvxssCVY^s}isP&}CRhNKJIq`-n%Ag*?w66< zJSsvw{LT6uN(Pl0`wk9E|-AjIh z$$!C?;i@%58K0=6^X@~c`y5TUsctHckui#sM!iC zV$WLkNfesq=k2nDb{s{qUVWN>LVb4aq~!a#b<=(X{7;~iG80i*A?;NksV`FcwN{f8 zr8qsGQVM_8szk^`nf|q8LiQlR+s(^gV2|BNP^!rE!^1as_Wl-H)${*TE&0 z<$nL`_~`U-{(p|P;r#!s8-cDp_?EVS7gGGmdH7aTe)8gkXHoVA!f;T3^ZRe*`acu^ zYy{vbp5RUTzjb<&(f`M-ga7w=S}q8ZHuU11Ad~%As7ThH&^FkR+g}{Dg%aHMZlY@U z5~VKxS=Y^=w}_LPkhNTF;#dO3RX8?q8%Zb8J#6~@+=FWU^R(^t|Mn+=jr#v2bN^qb fb8>R{{r_iaFQ)%vgYux0GZz#H!Yq&?0H^=}Ee&&> delta 5769 zcmV;47Ix{_F3&EIc7OJSH;Ku`?gCsa7W)8ATsE<6K`e)p1@Pv@9CO)NV$c0`m8 zwN8(Z^}nrFEBkM&eR}j=`}pMa&1viCxYhZt)jn2_P-}lIl(#@g%)e_rnO1gif0BlM z@)@`wjCRqYUBkY=`qRY{gseFrY)*WspPm06a6oTeHbG;_IDei2x)=|T5uO^w9&}Mg zq-u*5{#K*aXw_bN$cL6_+4aA`ZUADx3Sj;EZ@1g+PImntw@(i1|2Z1*@C>>&geF+P z2z=H>p3NH`u{jfLDjPQQ)LWNL6cj=nd?tuweAz|KyB|f<7Ni+Tk%o|Xwo)GIZ$EI| zYvvMr*+qltkblW*4g#opX$x}A*@7tj0K9+z6QX$+On_66AQTBj&+_9=G0DZTRxcY! zx?}+qgt+Ex0@Wx+U3}FgL9f$jcQzBG%z>U>TqBqfAvxY+psI7YG%X%=PqR>A%(fsS z5ajo5rkI-!30m#OzZysDN!4tZ1x~n4Tn7`E*ca+at$+6F6&k@5I8YmqWQb8fj3bY6 zKmu$IQmDrhK!E@UeVGH*WC)!hEu%uA;4tl?xs<->HWN%Y2x~gb7EOu8ooL#OPRT6b zVEN3kY(^y}6nInNvotcx=VXB;M02`q*7Wo1z;(xln=bn0*V-#&A+xD@%kb+hKfuy} z46z5bntuSkAU2%aHVdd!^J{a!61qt80MIM+vl@!}|7#S&B5Z&K`a)b+joEZ@(3Eks zWI+RsL8|GPVpjknQ}qyi!BjsC1gO6`Vqa)O(PLC{=DNVU>PuT@F|Hn30;5+*F^kv~ zjYAefe#1GB=rgXTKa|=8IxUAgAgb{RwzHF>(0{Ki1uYsEic?#e7z#C}lu4}q(lUV^ zNB`rxNX`NI%n>!8Ip;A{7d<@Y(YSa5?m7&Y8o+c$mW3_?A(@xyN2p}T1Bo3han05s ziH-=pLU%t33;TZF@=-ip!UD9NgwZu8j1#%MaIw(%feK|#>5r+HLL%S`vH-+Euz=fu z#DAP|SylRNMs4tAya4pY0=H~VB(brp zsE@%86ttE%xR+?AI92n>e4lX{Q1wzW!~=>}iw?lq(&mJ?fCGcLQR^RLsR z*^T^bO&y+2iCWH~ic{Ku3`(^!9)CWP%mp55Uya2=cd+cDy51=`bD@#eP@a~cdPtG^ zs<*;c@81(4Mf3uuQ?O+h)rV|s&%p^?s8_-tE)HJ_<5#C*J%LP3Z>@Q?q#nPRLeg&s z)Fl)?GdJ+y11!&N8-)0o$oaqlDz!L7i`>KapD12n;wtUbMeQP$b5c#^9Dn7M22h3d zt;;6UK)^L;_sbQhlhjjqhV^&=`2-)2O!_$BmPEQMXP(zfRn-)95tXrlSD) z5#L|=K;ID;hJlyp>1AHj|7FgDre~i<+J`-LbFq`wj76 zEV1Vgs0Qhggf8}!{dEo8BY#2|YpC_+&6~XLeO7H)dg^&$1^iSZ_;)%LU$C_24x4@5 z5$&Xe3w4oMPv?sDkNT8idTKCE!P z2Wj-gSYmZnGO}1Rc5gRj;5TeR8d`mYz64`KD5WnwFe(kpmNg zX|1M=I+tN*r8KpgCVxQXpcj&Pg??e$2FLENXfV}A^!a&DEp}yinFEYIL2(NUL1q*j z1{w6w7b52xY&)tUi9t`v zUx-T&>!m$ehqg=0`hO*OzV_WUhgUcKaYNqAC)3yz|2b~8GY0TXV(Ae$szvxJniA3`A@VUURN6jQ{sYL`f%o{X|dS&vRhF)N8X6Tdw4fpV1AqS>}0;+4 zl(}XUn}O+cZ#NCw(}kw&|GFp5-hX-+39oe{OaKPYUu zE`Olk0wzm)5!8QY+v8 z3*sd^riHBE|2wVD=}CJ3@3htH!~Xva?T%2Vi+b9y_khY;G9e86zGyDmwJ7`cbE;He4DBLZK>q-YuH=#u+ziqgy?cxta33Y%rU2NS#N!XcSOMJm{Gdw*dhVgNU$NtG5aR6nAI^KdQGY!CM**M<*a2=qm?gFuqJk^30Ev=R z7Ra-9r_~Y#@8Mo@Y?+ieE864F(wpN7ScmbkPpsd3ady&L2Qdk_kZ0{y5l|TGvu4V; zKl_PM#;-q*z;-e5Ow=#O0MJGD3mCxKAl(uQF6T~>y!6Pcr%#3Rl! zO-sf?2Of1GGbbS?7H$IQQ9g3q4EKsJkd{Mu;&mJ=M+J69%32HoGwdcTkaj1@a5QOA z_}wyT#1)i9Dqz1~U9kpre%YphchnU7qF6;wq%}gBKpIH>HC8M3NaO%ax{Bp9F3*Y$ zn>4p^e|k&7%&e9FG@f0=e~o~vR=zrR5dRdn8>L^T`c{y6eKOgrsOK@}E< zueRM;i!zZ5vmlE1AE1uU2$g5w|8OxL-1djP>#M==rf5eDleSiiMVDwbTHRu@i4?D5 z%EpVdin@gu%+u&dTC{;{hoogv4`x7RLGMi3e@HQ+7=tCWm%RW{Cex&ZBb{ky5~(fY z{Is&eN=!^)9^Q7CV=JQ~`ZWfKPUo}0^W`!{uLONzZeRwp(tkM#U0KH_o9`*=5v}%T z1Mz$2ozIzMHgmh^=Hhx6Hqx?NNI3+lNVzbd&a^2eZouJY&OywX>zK60xXq>X--GOC ze<>N&TrvTAY(?(~C6Zvb2QFTY!DiGEU3An+!Gs&!>UBH)+6PXUQ{1`S4E+C}M*Z81 zQNPz8-VDw!Z{H0r`$b0KJI=i9SoP6rN@gFi{{faGnC1bZ&l2Xoa&RnlI4pqENxN$H z3f@JTR*59d>_C!Ave>8%D7o)8o8_iNf4H?#$ZDH7#|+i$S;31K`03{6dVG65y88Ej zis|RISORYbY2p5|fx{{cZqMH~?4^b3{nhQb_u=;P>iz9y|8xJcD%E_RV97MVZ?TS0 zIuF%#;>uWJ`%Wvt(y1^E_J^MbqpRV^{_sYN{q_0HPbKFekanV4r>R*KijF4SfB3Ip zbn^slwjNiJw`V&uWjkjET8JoK%lrK*Lbz*;UB#IYZqo)Xv+A7#cCM(qEYJ_@S5ksm z?OSY^Oi)yxg2Om|URD#xE&ne25wisRJJPhVC3zrZQ+p$9nfW5yPs|Qy2qm){j}FOd z)M_rautsL@?zKX^g>J40a(B1de-mj{J6}|FoE5s=p2L~;cWaKRft~V*e@~IfW;+)K za}ylAbkf9Ms8z`S2~F$WuY}o@|9^6Nn#uoZwcDLT{?BtX%d%1_Kf>hfH|~Bkxr^324@^1V`edNAr!SM@vqSC)CQuYzfA~@K+-R zVOTU<-zAxXy|i8U$Q)Ref74y7c@ffFqoN-4{W*BRv8&7P?tqR`b$u4XfkQwi&SIZ* zn@zn9o%e1OZ2#V#V>*LxNP$&Y|CBlKqzj?eum7Xg>6`5RZ*NYHPY>(=Ia-+ihrwfX z3_GO%Y3bN!;$H^gPHH321=eH9v4mMzC9Z5@LT9G-Ix>T@b?jL_f70(Gj>43bB*I-X z2XiBRk(|K-E)Lm(FP1G>IIx#U=nGk3JS`{%S2Gj=#I@$>6v693FXc{JpGTMgbf}1S z^(Y4{I$vy)K&V*UHFYqTb7oe~?%8y4u4hDAAv@1KHJ1k{1E?2pLTBM!B81KcGs;$v z`}bf6`kKzv886A;e+_U?WCJah(N+C>pF?dft<%vjuBC4YPT@n+M-+exn}GUNalj%v`L=ZVazSv7)<4 zEMG15Lk>r|k2U8p%v{45HJf=JrcTZs+e}Y&Q+=U^m?wRrf2HyjO7DWWFjEJmjLW{% zHQMXH9h?tu-;J(@H~nF+*hN~Wj1nq=V_T9%E=kc~8Vtub=a-kaZ!fRjzWX#T3ufu^bz$Bd z%_4K4jL^7$e{=is{NJ}%*ZrZ6ncfaRecWkmxeJ&|X)1b8BIQ-1(;JM>-(L1_KVA3E zZ~DErJJHJA4wWIcIh3GmCo2%^0tpJFnFY!Aw_Il9bur2aOEV9c!UfXage3-9mua8L zosIE5M^fdM@$o}q@1HxdS1-(}Ico~f;;d6SIH!^Cf0{#Ed>M2Fc`LY#ZNE~tkw9J& zin{D5fnS6A_R<)~&vRhmd1m#|bTHhc77oUtN5-y*|JBDUTs7 zV{0-G;Xd{vM6J-?G?+JBGQQt|L|8oLz*#Y>jC$WOxACNk<-1c-Kw`%)7;Z7Hgz%Y2 za5!}IfB4w+-C#zjXqJXhikSE+Nd=SD^>2sgANxg=cdc8fP$i&w{u=cG z<%ri*27G&!7FB8$j5n`Bl|ovOW+0=)F(I)Oe`f^&;iCIYOBmHx@oI}wNbLERjqN!o z4Vh=P?AG35%vz;detd;*4q^6=-*r<-^fk`W!0%vbVSSUc^@Yf}MI2CDs?4veYIi`d zop60St{G0rvP}gBcB@A+6H;1>6WvjvAYFXUW2I z8fQM9Ve{jSZ;7MRsI`_WWI^}QhWpnm^+_Z0QjxK^v&l z>e_OeBLyH?80(t&>ZTH(1der}PjXkMkCGcox7HXwyR*0x2PJmTlPBsnfBt>Z zUDzu1#II-UN%eGVthdV3k;-jY{ian~JW9>HLqXrm6!dLxmTi#2O8Y}~am?)>)>_MK zEo)`QUX^0gzg87wdHoNcVB6ZuCEy$C|4xrikFxiFziA)t|9+O15n%Pv8|Gq4eQ|$S z{AYgVXT)4c6$oi)9LPE2MjxbUu{CG|Co7F|8vy7 z54ztD1oa)9m8qKArXpJD+RY-$A%yK>Au5iu;+S9oRPHcy;b>l?x?e_afAg?xLDXio z*8x2Vq0Ja~2u&`DA{x3jZZDHovn;EYU#)KmbqJk(jYXTe{Med=q;@a)4JQ8uTZXIF z3}u|E-l@grz&2NeS1CzU+9q__=T+WQ^j9PnYv#S|<9$_C8(ygMXHc^hRK%XO?2{-o z&ClCq3GFzFV!iq_h5GE;e@V&rbL*!42>4$>DP<<2vO?OcK2l$#^lPmqCrWpEKBW}? ztW}AShcf+Z$%O1dg14KO|A9SrCqbzq(+>}a+k*;P)${*TE&0b+Y&W93Qt2 z=l|zu8_xgFx)JC)gl}mJcp=4~oQH2k`p-{J+oBasiCAp%?E2nassPMY8sUw!wzn{^F=Dl;E~^6IHvHD0TVIx^4!& zMV!=xtmR@8#}X*6!m)wdNIHq`VbkyD9#rdJr){tQw?7GN)c+@MGWx%hEf*sQ9Pgok H0H^=}CJj*% diff --git a/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml index 9ece72de3..38ef78cfe 100644 --- a/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml +++ b/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml @@ -91,7 +91,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} volumeMounts: - name: socket-dir @@ -114,17 +114,15 @@ spec: - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.controller.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml b/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml index 328a911e9..e5ac93ee1 100644 --- a/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml +++ b/charts/latest/blob-csi-driver/templates/csi-blob-node.yaml @@ -118,7 +118,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} - --v=2 resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - name: node-driver-registrar @@ -174,15 +174,12 @@ spec: - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" - "--enable-aznfs-mount={{ .Values.node.enableAznfsMount }}" - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.node.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz b/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz index 2b88e25b6df068249a25bd7b1ee7feda5e765ba5..6a787f455131eb7883fe9125db7426878e409260 100644 GIT binary patch delta 5943 zcmV-77s%+iF1;?0JAZv`a~e7F_`c>-sK{R3-u*Lx@k92g@(ah1EENY+;AHRqTU#zM z(}2!1(yXK*X1)IIzmhb=Ji~yAo!dou5i`^3Zb_|HtGlI{xNKtCf>;hG3*gPmIp(sl z#Gd=z-b$<0YMq^&=>J=-R`&na(b?&DM<-{k_UTExb$a$)>woCv=&b!6YVD7O@)8J% z`FE`+)5_@gAU5bRBe&P_|lC&lvT;j{{?mf5c^dC>*xQ`(NU|Fo&P7TH;4KE9EEsz z2Avv06D(i^K7Z>V&*lw}*qjMAl?|JD>aEKr3JM_(J`+STzU-hT6QX$+On_66Ae0<6 zBZ1Pu63^mUuO9e;>kgSq>}3b_rvoO390X8VL&z3n$k~D@RRO#jF(tTuTq!~GxN+KO zZ6fKC1yB%T$k_y{QH-kis!D=hyK&UoOpr1Mx_Y9GV1Gu0_j(5a6IMbD)|Gp);h#RVWl3rX4hw(ifd(g6RffO^4Z{DY3W{O`FjvnFSmy zpE;JzsDH$S0&gmOmPTgzoGh?}s7sg4ntnbETz71^>7ZYJt-V4PGRvE{3p1;lj3AK&9$mn*)~6L6Qf6UZI~=Q`FzDQ3#8$0T$>Bab4ABQ^i43#?g`m z4KxO+x?_r60fkz>cH; zb6q6ofPChNn$Mhz7^;IF9`k5iJOOtdhARzVDkIB6mw}MX%hV%OGUS284wkrPYmh`m zgnwS4yB~#xeLrslDW0xi0or21XvhiUL@qB~EHr+gLK#+iFcnis1bjgjfH(*ia2t@A zGcK!2zs;x(zKkb;?wzKZ1b#@`@;QK~&AMjIPx3R+BO;bUf@j%oAS7_h=0p-3yNdc4 z>_9(qkd;VI_N>+z7U^;67{22>j*v8@<5NZT4GA9gsos|D7jSX=~DMsJgYkh zD+Olhmu9VT%AtjiZRj91AeQOB8d9>xzVtj__3o<9O8nVF@b z1+JqeltTobxZo%y5HpS(2C82ra=-`bAV$&0aX+eFLrP*fvBi*a*+K1pogJSPFMV^s zuAIkU?V4U5Tw-I;vh3;V4L3O=SaOOfIt)V*q(zExKOWzKb#!C62`Ai#d-pnnqFG-YDqnQF@J@m-}b3X zD12sa;K2u2Uf4DW@iUS0z5`Thafl|lhwnd8yuid&+Npz%s%e+woPGeR5WIESWEuz< za(2I5F*Qj&)d&d{61#2$zXFYcSDuXeQRAd>S}&^BE~wXzE4CZ$#*wKgKz_vc*FMm9 z#DziNC2D$^7xjOcU7)EMDu3=zzJn!n-{u;1iS0QIv7C2M)6-Tu_LVBXAs&n+_WVB8 zAYGDByPh(&hQK`{gfVUH!At<()N{wnqO2g;N}5&w9Ny1lob-oHlu!Mp3%=;w>kpg(y38udn_ z>yfg!l-V4r7tMrv5P$!!xr7Gysx?Y$x*$8Kxxl>Xl1VdsQqyx%L9zoPzB+shXhT{YR2jb-*Kdb7kWECiWRaFB;7o++Cz z#EXbA_6XJWh1!BiplUyeg?o^T_t}ZF5Rq` z_Fo;!(@NI=E5Y+!EI~8}*EhX!L*C0L)7TXMIX-J={J*DXXU7Ns?{kzJrG(bSbdVrxvPPTe`@yXk@vb zKRlqu2sB({Wq;42$4AAIL1tMZTprKs+(71Pp~uHMvMjxTA2e2~l+XX$Ik-M>(UATV zLqQwnfBX3CBs2fdPS0A0`Trc{;i36Yv>;wbn+a3mf?WD=?x}9E=g>j_Y(75LY8tF} z?_=s{>w$)@Uu|&clmQL*@L(Yari22jYb#K})sYpy!heA)QNv^z=&9NGY$A>d@_GTB z69>>A%2cH`Iy=^X63>{2sBXo`fI9kb&0FnKJCzMDhC9V-7ESPEq}D!up%2ryF4Y4)=`c5A8EkTB`p4>ZpEO-=#6k^O!Q+jA9dIOhy34 z32m@p-+#|IDvVM7@PMMZof?}4Dy*cp$<#I#v&yubW#cd}@>mlm6D-SW`(wI<7S+oLE)#QQWF`HNM~L@~lu`h&ujtMUi>EnqU$ ztB;R0wF7F1c~gsFQ4>P>Y$VwUXHqgSVO}H2{(pl#cMZgfaB5`DwD$woT?OW*jK>~* zfr`i3k7H+v!Hzl12G|tgT+mHd;+W20Z9TM#%c<@yX-&FeFRgRGrBc5B7X(RmObc1R z{x3@ZVHx{ylb{u(i6jOq_cm$VM#G&>(O9uAz*dI+kTbOhjh`c=_M;s% z3aBI=bj=QOZTk}Ok(h8QXtG6%V_P@o$x6g)li7#iFwe0Va?m@xW<5Qv8tJ5mMx1?@ zR9fLY?Vx(1b4~pyOA|dl=H9Kt-GA8xE%6LJKGsbKxB@Ejvp<^YbpRpEXCf&Es@v<< z;=EnYJ=FFqb{q~uoPYnrMYlWZjmQ5e0CWL6z%2+o;GVpSIRPOadAgW|%hG5X`>I%Fr;8F1krP;?&Z#WGr+DQin8i z5@Mp_CX^l}A;-mVulNFKIfN%($5C@sU}vPP#Sk#VZo&dNY9|?vIxPysTPAh5g0e`3 z?)R%J)}YQ$+cfZwnqprRtAFT;v_>csNCT-yV>M%sMGjytrEdXsd`74|cbSb{Wyy4k zjhr;+anegcFRh&mX*|13g*C#iO8HFLVJf7!-8d6;u5ShT*SnOxihUkqUiLK1jzYd0 z{%T8~wKy*kxiB}Pcr98V*nWPv|jKLDx<6eL$lYdgekxsQUnbekXgj(5V zB|4@s5pO%pvBgml{TgFL=kr{ zH@N9vT;0CwU-gPCymy>=**??zq$!zw#Qq0Zj$oPxh=w=JUVr7_SZb$N0H@P#RX-QJ zixQ|3xtH07B(+?zg&I(D-)T0>?SgP^qL5V=aq1YV*RvxRG4RvP&2W4>99{qWKgIO( zYAAs>gS2pg*uY^G2Dj&L8}`yd_5S*H-2HHSb^ZSKs`t5fRh4Q!{ja1O;J0|jDW!$# zItyhiv3;ktSbu3(7zTTT&;8N$;A3xaqs4xBar0Bj<_4slIB(O`EDA+O6K?!hFuJ*e zHk*H|FxsPr+dvKQF5ZbPJT1!fT%2(W;2#?N&s z`QKmG`4sj2x4zqsSuL4vhS36#bs>LxYoO1TJmlwVp^8^_v2IM-?A*L?7+-#ua2*exzAZDU8<>8wmkjDgXcU>@1uAb9A`><#~!_S*esCVRH5xcR!lk#iN?Z5V{=q z&B4C)$v|oEUM9t7huj@3CAQ}y_$?qDocQ#k&wrdgow941JlU2z&z4{;41YCJ5Qasw zT3(SU*h|}mkIaG9!MdU97a`5vDe6JbpMwV+yZRvB9newggr9|Q;1JM>v)Ct{W>c?R z7u_2L+q<{tn9krE(s>o;KV=R)=|ZUW^Z&SYlFR>pb9!=gnE%gF!u&rB9;0K}A^lHF z$A3N(|1t=7QX7FjYCV=5OPGb{#g!RN=**nLjx49_;q@#Z>Gu&wVMylPYYqf!}^8q6om!h zGQh4CAG438BrzRyzb8{?3+8$nX7hYE55C>}M%|vhW;Uvxxm=Uo7+#cOMYoh#zJHqR zhrEq)w`|VIn7Ml~DmL>xOr4wux0&whrW&D!m?zz+rScU@?}E56QyZm>m%r4T+U>pV zUkq;Fjjjhby+OCwt6Haw5-NdXTaraCNzq{H55_kaS68=hudd&|`!p_$9jE(V%XI2y zpGM|(o&qPbAr=w2{oEUk``3e=h<|172`VL)ZCQICO>1v#TYE2!Yj4}Sb&=s5%_4K4 zjL^7ubNlh)-?!Jp-av<0ZwH?~?$oy2RZOKc6}=~s@~YA4_Qw}*uX?wihTV&sUia-z zv@-WZWr%H#J}BGC3dFkDg92%0L9+ENo%~V(OEV9c!UfXage3-9mua8Loqvt-Jx3zt zmLc^+V((u#vDaW;oVP1S__Q9_HIugZO6Up_Sa4C>e&ui@fxK)KRoPJ{zXtW~q>HEED=ANv)eQfPr1 z%p0y6-)}=AEFN>LdaOHTe zwhO6STCWl3{QDn1e0tlv9b9*Nw}Xq1y&}rHo?fVMRzUOoHBJeXBY$3VKH%FsZ&BsM zg7NQFIIEBrq#4L4`%FkI#d$%9xah9a5=Qkkz1qeU5_`U7V|xxtL*`j6yV188vsS5= zA7AgALzun)cU@N!eVcRC@H<#qSYPODeIasg5eL+gD)aNI+EvhNJ6zwKYnoFsZBwCw zdNrHo8#vlwl;z=2$AACKp^lvEFunS0#+qJ)4yQ&c@wdyVk@VtRU0kOGT&F}%GvB$( z1L(@eh!sBAdVcHA`V?_^IM?zu&b90s+<1=jFzdE#h1(&{d_2SEqls^cqf?%>mMdgI z*U|Rz*E^PzM&!}#`j`B8qcTqRE0KpYJuiNyr#O&ap0E)yo_}PH`LenCIABv-cTN2{ ztTbzpAS&Z8_d1QyC6lJdoWO-_Z$`fViBMY)@z zEHs|#!1~KCIa`TRiuqDcR`qpk?{n~Z7q{HNmmXIt2xzW7`jQ0Q7}VT^DHGgW6WH8z z94CQe9q5z1!hh+l8#Vwt{2P1xxuxkhIGx)bg|%G0;kwbUZ$!qzTN%&AXgfuqdyLSeWH*2i4OXuUjJ2N zN|^7z)cw^Nzt9j3ffm3g8ej&p{uv)9X$UFYJawZirBN3eG-MH`Mh10(2lJr)~iob zsL!sQlzcz8ZrYE4{|S^*rXwmVq`m4RH6o>>wd$NGx#{_oQutY`5+M&|`qz>U*@Fac z*Drs8HFhUKsUp)44}Zq(`;$x6{r^)g0okzsKRQ0negEs^`0TL%e~z+Y|NpEDfj)Kc zEq^TmFQoXB{qU`*{N%+6&!X%LgyEp(_g~8Oe<%Rh2*6W3!JG7d>+Ce6|4&*6|L^mZ zTpT5B=*25RCV{e0k*wXJZLlG?zc^|OCAjU~MAhykN?rc5u9`t_5hv9lYq{9Ou>^{% zaBScPfriu|9_V9V){QeC=Z(dp!pBw ZP!8o#4&|B4{{;X5|Nlpv$LRp5001EJ&8`3d delta 5938 zcmV-27tQFsF1Rj`JAXZGbK5r3{j6VsQF?Q|dqTZAc6yre2gg!VO&nPxIqBWg>3AS= zCE-j0EC4E2UH|uI0FdIlM9GSsyGQ7U#^hpm0WKDceSxMfn_9LYmcz*sc=K|BxooVk z=YDsv)@rp{XK&u<|F>GL?EhQsv(xX|Z%)tNp0!TiwBCN#YJb0JzwLa7T8CqyyahsH z{$1VY4)?ufa>UUku6He_(FCgls`ZoGpn`6~Ll^8Rn)#>WgkARkK|dIN^pd3Z^cxFV&M;?bRzZh8b|6HXz9mqktGk9^-%n z*c_x#kEehF0S@}I0IJClI!9Vug+jq$)Ba|dBDN) znPb_EN`Fi!@TS6NX=Ikq$r4M5rgYV;>F1-sbti_KF8cMi+ACxsv%Pt%@Z+sMz{-FO zu?Mx90KOzPT-Y`Xs8sW7bHEb1Nb&&CEA)#Riu(6!6v85GfCc(OTvv_RbaBv(akOGV z15H4x>6l?x03tK>5PiW^KMVw@zd2%GXhYFsRDW{jy1={YOIv0!t{++gqgO~Vi`WcJ zLKZ@P!#R)WGp?sU6xswjErxp_s_`kdvy-CGuWbe`8kdSwTNxV)HKvqFtp3t6fgMNx z&vlVp0P>k5YCdx=VyG^9c+8`5@f6%m7_Kyc>5ME3T?RrjFVl}u$&d#UJ6PhHtw9nU z5r2Ax?tT;&_WisKqJ6S=x{vC#N|3T0U7kExhKBH&B11jIqGfZKq? zf^k_@`fWyS@MXLJ^z1a%BJe}fmd^n^ZPqnwev+S|{vu)}BzTtX20{Y2Y)&Mxv8$+0 zzz!6&mN&SUXs$R_^T~XlaT!qcQZmE?ihtIN4q<^Pqnt5`rYj^sO0}f1TJ##-rNO2s zOjii*vFE#hy6W!?NEbaQ+!x}LP@>*$wc6;hmIr#Q)e=i$Eo=o#L&>F5PnUYW;#J*6 zSSc_|zchP|Qw}YBY(p2R4`P}5t05(8>`UMC)$FeOti^x9gdBqW!uZ`wo#N zS>QTqK{-U=sSA!$0x{#rVW8$!A_sh+4q_C2oD8DwHKZh#6I+ZJmtEBP$Jxo7;;nB1 z*p-VItW(q5gG+2Ix=8;O&};e;D|zS|g%mx!H2jQQOd)>dvbmCZw~J0SE+fgpkSp5n z6<;2aBKk`)@9DkiwAK1Z!lxX54}U-;p=>`;QsCvfb)sGc<1HuFej>5hS}Lc}c_NMM zM*g*?4$o#pE$2|hDeXT2rCJ#eA4%o{54EqxVxc=&bx~dK6r8!xNNXrhOHe(e$b8jX zVXOD=iI5_C0kav{vWx0NHnA7r1TNGo;SU#wuY~cdQ?Z^vrlz;nyjoI^Uw=#?>9+&w z5(=N08+h;mRu{GnLi|GHV&DLkS{$N9?&14S6fZGxm3Hc)b~WvClG6`B6@qszo6Z6O zBhKzuYo?~Dry3!lLSomA;Mbrr@Y<76Z#UjFPU}V8ItBeYamP-h(`cKH0^~=0f9(VP ziMTKfyhKkg^P>I_a|$#)Lx08n$#<}X-n-nOF0s9U5tfTCYI@pA$G%eKH^hU9#GXH( z8l*=OYS&Yy)(E&qgfNCr>+RdOc{Tg2im&v1^TN9JsYLJ}bSA!FX)hc$|GFdEOWzi% z9kZS;6zd=LfyBsNZ3b8dT+6~tyyql$YRZyxZRX3->?N~XiXJ*5RAF3?6r$IC}!%4#~}K963k}gzKiPDlZmwe*HdqG z|MzUt`*5|^6!kiKh1Ti@8Gn^}`~zi5p@_e|M!o*KPw!u&!SJW+*XWmv@o+GF{~Gnj zInYNcNE_!OQD;vw4RrFzrTUZD(r{EwDQ#@BT zUx*hGW9$*C`wMjhlR(vZ5DWb%G}tvksScb%9UwKxpohK?xqr}LyHO2E40=lbLR@-S zFYUiNwx_kM|5t+NyI6u~4zF+elZL#PPo}Xg{&RBH$@qUy&)%LK{lCxAZj=(*6w^@( zXC59!9q?VY0_{Q3o{CV{G-|aQhU^8{cUzDd*M`J05U-K%0v4bSklk9SZf)rz%c7Cx zdj9Z$8e`CKjeoU0iyj{pO9q)`iEw*7uX6*L>xCX4>&UY727c67saC%JZx`VDz(qs) zPYeZZS^u4rvp1Rbe|Gw|bzJ|?(HCekeHay36(Qc|BJBxBI@Y@n16?7M)arlnJFz*|DQUl-`4jT4D&pu3^$|LL>ZG2 zfN??_Y=7AIGmZ*lls`P6XlBn(#K?Kn~qs!TF$a@n3wVECiqk@3ANq8DH}}* zXEyVEM)eUQ;a|_g`It+K&2}DnX!}~zReJ1Elq2GO7Jd1PbVPZn_i4bPgN)p>fC)(gS;Go%=1d^8LRcNU~>I z$madO)9PgNzdG$!>-4z)KS#SG)ajz0Htap1vX)E;!@e(?%XTe_dB!dtwbmkK5{m~7 zw|@r%75bjbO!yiKcMIdh(oxfGXc5Y98}53$_!Ch=9iS4uQQ*P{9dC|fSjFh0_l88Y z3w1-suCy0d2{tP;TEuF8hl^Q0GaaM#7m`m*ymmRJIN5=hsuTBPu1l=7vo5Yi)LyrOj}$q36XYZJZ<6+AGPzkR-X>Km+GMjM%=JzaFZpz-Fm|y- zCP3nT>71EiR=K2KC$PmBQXca4hfkYeOmvZU>y`;xQXRxW?t~z1j%9`(-qhTbOMi8i zSTcBCfn+N1SVDP|&?P47<87 zoPC#6TH!kFqIzO-P5&rM6FolW-hXYv-Q5N)@fbCVwjcbOAfSEeKP?wnJ2KMHV1YlF9;k-tM$o zqToH;OO7p*2WLfl{CRqFTm$RaJ@$$9yD!d9Tbm%J0T=ST-6{eKqkh(08GrZZKQqes z=<^tC7ZcCK|8fifU1q<45&Uq5Q}2)o#dT3LOdD+oX5VFHXp~47-6kG!YH33N14}Z1o&RUj< zT$m40y#D}od`74||Ne)|$>6p>?2WDm!<(WVF-#6!FP2tA|JN-hn}0|pEv9U|Na~X( z88=U(BWckFt{sw=NkW(dl?A;s2_(gcVhonhUiJb+nVgdnj&ypR$)2{1^V7->D={&J z$#=V9j;)M}=rOx%FO&4PniFxN2&j&WN^>Awfr%~CR|xnu(L z*oxi}N+iK<4_v&OfX%2Qy6B{pf(bXc_3L)}wGW&yr?_*u8Th}S#{JvNalh9e-V83T zZhsnF^^1(cpE&cfW7S8i8JT~?{s&l%VU`DoK1-PJ%E7VJ;eW6IPG{<>*(-P#D_HlXCb+iaGb65-ZHA**fTY%^4^X9X`};OCp0(d2eCzW(RGis|RISORYb zY2p5|g~KWg?#|yX?4^b3{q^mn_u=;H`u*)y|8xJUD%E_(V97MV@3D?hIuF%#;>tu~ z`%Wvt(y1^E_J4<;2jlDE$Num}i~Z>0=I4_05J)>wqSMqY3Pnc~Zv0y?x_yGSTaW9= z+q0dSvYj&nEkqQr<->jzA>0kduHsAxw`l{HS@q5VJJ-}*73hb}D=ERO_ANF{PAIC+ zz+oIeFRKaUmVcN1h*<*u9ckLwk~|Qysl5@l%zTmECx2#-GlY`atw)Ds32HSLTUZ!# zaQ9jv-aqXkaxLjLs0LYJdF?*ZZ9#MO^Ja|U$Au77E=vMpE7mS8Lle>GANhDEdWU6C2s zE8B&S%z<@b-AK)gkml|b^`P%Bzypq5U5IxFbeyX2vk(p(0y=RP`=r}!>TT$vccWnY z_x1wQIebG3tit-I%z-Ce2(@|rpR`VM^*?V2P|o}f}<*?*CB4l%UO*0FUCGPusJquUhb&Cx6}2g(Rd`Zu>9 zFaCLZJ?am2%=C8n>Em8w%U#7(N>kB$5-G15o!($_@$RaB`)Sm>xas%a?L{kdPgI83 z_E3Vdovc7?3M43yW)>vd-*TCaH^nF;EX_P%3KvLw8J|;cQ(fN9DhlbJI2S4 ziM{{XiM@JZR?S&cc$Vj#%E38}eBT_};tQZF$Xmg+Y=@P)tpxItP}F5l3H%z=cbCRE zex3sh&ok?fri0-=mW%W6f9Q<|pZnw6asU18=RAhAjBUs~g!|Zw5Vb;k z(_r3mvG`#J5@GR}1Lwu4GJooQ&)mk-DwgkFNdbu+zhJn94- z`SE4GIfU8Uem6}e(RVjT1HXfnh4uB#))yid7I8prsWQK=s$KlNal-XIxn?*e%Qh7l zctU;2PFDJUl_kgej(=l)M-ElVZnhn3dQ&?VkW}Jtp8}HfW?x-gX9QfQL{2lml9vb2 zosCf}e6Y28(mx7R#Nn~T>C<+j?$P-N5I146=y!4WAl@X?}($bpS6}NWI^}Q z_V(AS&`BfmQUR^Fvq>jY#?E0S@>t&U;^jTXVfgZNkBIRkbAQa2&GkyFJPR z>!}H>zxpl@dENo8;wthdV3kxEHe z|0q>jJW9>HV;$bh)Zy)JmTi&3O8Y}~am?)>)<(;0BWq>GUX>crWAS2PE3g0IQ*2uY zxdePm{omQi*-7^P@3-ya{ol{hGAluS^oF^ZQeWI37JvVlpZOUx7g7a68X5<3!8rLt zZ+D5h!Hrczx@Ty7Sa2_aQ|u-$Q`HyW?q5I1m5%7@9|yob(ItPPi+-)wf76%}2KcXa ze_i7jnvx;V68KaD%t6*)qq<9k{=c@x)j!NUsed`jfB@ZZ4}$s*&dO9xZBr4gbnRvl zL8+VXN zt67#+%dggVggS)Izs91?Tz+g#LsI*f{07s1gDt~VYlbpORqxbdyJD9s!mE@dDs2<` z{_`sDDf%lCiw*N$_VK=|stqsH`A<-@6;#B5wSVlBC^XH_+hqyuIf`Pv`ZR_5?Al4m z_jBu}!wC3)fl|s$L}i6^P<^DnNa@#FPfnEX^n6Mw{8_6KArEExw~`4tfCTS0FaHnr z*u4a$icCK|e47XG7j9M0|4+FDWXt)#eR9&t-v9IF&Drt%{~T@0`Ttor0$qpjEo}iW zq<{F6^YE>x{N%+6&!X%LgyE>>58ul5e<%Rh2*6W3!Q1qIEBE`4Z(2wH@AI@=BqVL< z#XCVJ1+q|)Y&@ZDu_1TAIBEwaxa-|S)$S!qUH-GFn?Y|8Cp94(x!A_B1d6M0Y~Z$% zPNI9*_WQX<)%xGlcGv&Ap9Hq*|I@b_{VU%&Jv-k2_blzj^nYwn9yR|_^B>!>9ow-T U+cUTS3jhHB|FUG0-TLKD1TKuKxvg0}%UF0PENP(a}-+EW7?sj!qBj|2Z1*@C-UN zgeF+P2z=H-o|8KP8Gk`dCPectm;k3BK`1$DMgpaQC7#8rUOn&w*BvsK*vk&;PX|m6 zIS8P#hLA1Dkh29*ssea5VoGrRxKo1WNu#xyq)Qe+L5Lw|6R1Wp>f);|33~0uQF}8% z${gtGg*Jj25t8FA2C6!T%hcjg_cRL?#%v2R0zrP?W{SD#kbnB5KQ@lnld9P+3!HGr z7zPuU*q7=_t@i2_8o?AeP#chBh*3a{Bad-F0&EUasK*mPfdB`6nFG~i2%RAVt`OL9wMkOW`cvIoCG&0NQWPv3_bGmHS z^z&ihx?{sl2Y>zYYwZ=XklEk7W%%`$A7E)f2H1mIO#ojI8!l{{1yri}wK-r39VB@G z=oR`|4MqL?H40%7HoyXXA+D>&Y`QpT$~aoGpn=99)pSg;D*%zHdWgPYsviad)ZZMj zFSMcPF)BH8UEm${r7g1>R}U?L(JQ2wMQn=3AqyeD;eVV*^cmOFA4+WkotDEL5Y_ku z+u2D`=vPLA7L7~AsjaLHg&I@JBvyZEnZS;t|L3|$&H?$%5jCGV7co=^Jv`>oxOf8Y zIt*7Dz;s5Ig)RdjnV0EDsAR|ki5)C)&DJ1^jtIR%cmEO=_WisOq5r+HLL%S`vH-+Euz=fu#GG+iRr+m4ZSZBh0QBrM)gtgi(w5Hw zJZ;xCYkrcSf&L<5DI|E7?FK>uw`@)%v9YVDkHHQUw3auxmuRLqRrASwpK%#b^-?m# z1BzCQ4q<^Pqnt5`CQBqhO0}f1TJ##-rNO2sOn;UL?y={)fI8~$3`hq(DBKs~lTf05 z)M_1}$66lfu~thgiIuPwEDa@>Na!C6ITLaK@(bg4D?K6SM6Bk=W`XOd1?3QdCoVWj3B-&ehk=?`i5&2O zI)8{!^l{vey4R4BSWav)WL$Po`^U56lj6N^4%n6R7_42>+k;DNEILU470_$?5i5D< z7=;u)yfpj_T}&Z<<+7QQdAEa3H7+B`!jLQ4@D*Pkks|s_G4JWU>9p1QNW!NaehWY( zp=>`;QsCvfwWD4I<1Ht4^h9E@xl~T0{eMIn*^T^bO&y+2iCWH~ic{Ku3`(^!9zK%H z1s-Z&jm1KDu17E(r^3JB@{k0H}K#CEH7*ug!q}rdEWslwSPE7 zi`>KapD12n;wtUbK}SU@=d_y2InF5!pbG0-mrbUDfFWo1%N3`S)KiU+P$9AFM(``p z7>8mIN5Zta48?YLvR(QX`>jsoOIe1Gi&eMejv1YV-2mw8eD(VPuU&rq9x z@*OOp`!+YIOKi_!h~>P4nw~b@v45}h`wj76EV1YJsRrqigf8}!{WS#c5h08<)Oz#g zO^}MhGeku|CJDrLzSlV-k&A#r4c2dHHy2z}j3&r|JeM&KMSDONs0oSrH z74JC<{OD9S0_^p@dOWt~;CkwRyC1GrnxbAuuh2>dA>+kT zkAI*{DHQRK*Qncj`|15_)E~UNevN*<7!CS^_pecJG`b!sGfdgip?c9ws0Z=iHJ8xf zUX4bnQWsuUw;Xn?_vp}Ik>** zjT`b_KAFa*_|Hk}DC7Sr z4B2zA?=~Pao(+j*AYLQi1uQ_FA3L>BvD(r_mPI4O_59%hHAbM}8Y^=aJw7Uy3^L0S z;YN5~$G)Gj(G{i~5`XmgSVxwnckaKssw!Xqw{viP;G!Y@Cx(JHtpE1$*-2*opPinb z9oGMIw1Uyi3@V+!?~xX#hyb4{j>S_SgUEU-o1~hqwNG5dVaNOp;HDl z+{1%~9GDUcsIKij1y@H_{0axIL=BT=px0vKvxzt=$m<1gPJbLge<-7r+UV?9|C4ye z5JYt=Mh4W;|JJ`4=7P5Z-Z>#^(`+vLj=B$0#|DU7X z5$beMR~z;oP+3bRgkj$o&BakI3VFuP9JSUWWfF@A4Yvma6}q0wO!yiKcMIdL(m~Tr zXc5Y98-MO6ns|1^s87*Qpzr)2W zpP7!)`U}Y?CSE%iQ=DwUOVx?{SFTH}wX@EwM&!v#_A$=A2(%HoikjLNUza>sMM@0* zP&=29Tm|MM4W8M^;`eKYkk|mys1sQOai2uOL4T)U>&$ff10|_u24nUnj7|7*f9Q z)rU`;U`%w7cHou?T2dXvG46yQtq)~}zTDK@luLD%STc5Afn+M~SVDQ7&?P4o;GVpSIRPOadB}(ybdmi6Mt|_b z8BV=JCg|2d%`j`UA((xam7!rGTXd6n#F?dO$yn%MqYhr?B*etOP0&0_LXMl^UhxIe zatKenj^pE~z|Kfniy>f!-Gl{l)J`%SOcH>IWslFBDU!PL;D)xDddD+u2I|})3_^WMw*5bTGJM2J(1#7OxbvpRuQ-mgLxVqPk)OxbM2h8 zOzOc5s4VE6NgF9f6l1W2_P7@y%4C|9aHKQsOd_>q9HCZrSc!=#%){Fbb8K-`M8C!u z(dm2^c)nbw=#`)^%ni(7_8HIXo0X+(viYhAP2u(&f@I$LoJnRgw}WmjhlzKEiEO4# zF>wP9H**eR&Roaj6UI#`_J8*vI~ht^xucby&+=h3rcvy_XJ4)vd9{rJA$9sE*zJOg zmt(LQbwmdpw^A_SZnb)Slz#04C(J4CTyE3-|4*ad?d7P~?G0}F7gx9M`d7Uo3-29g zUUts(IcZ8}AF=-dmLr(v0iw?vCa-dEEOpW=fYVvGYMu+;MX6GWEPu=FK$2Rn*g_2` zx$iWa<#s{1H&Mu{i#SOP)$7?W7cubD&CPIpI~-mA`#;6>^J*x8H-ofrgV?}f6$ZEG zZyWZ~LiPUocHI4Ndv*Q(_Nw=}cU6^YKGCma8sN8h#VMtQ>N*Q$EU|s3wOHv?7zTTT z&;8N$;A3xaqs4xBaewnu$>9d1ohY|yY8Hi}qX{?uD;V88L7UCLRT%Br4nWz?nSmA} zir4afzlsp<8f#T?CWPCxfy?YR!2vth)Ljkg()izB*7+38{kMMFj#({P zZidkUk98q``e>l5NFMU*wNS+?yI2nTSUa+}>@Z-4BR?*bcOu=5-E_`GTtV-yH zYF>miSEr~4J%0`!aO~=myE~wxR4t!{aNrQoiL=-zon}+-To>IN1>3u~=a|mm8`60d z)<0zqJn2HH_3Qt*b$a$DlmGwb^yEG@&z7Zyi}qlR6MKf3V+8d7#$k z5hef~DxzII$^naxB-IR(9S$Ka4 zp|k#sveo0>J=lT1r89LbOfq-_+!NW@jAe9H@80JS-WpL5c?oYrk*4xL-{Q*#22A#8 zAxv17U-(W@SP(7)>{@X_e|;<^iRGZ@J()UNu-4Nso7cN}@a^U|>hbI~vr)~=<(llq z@JbXbx{t*2)nY&7ZIruZb56$0)r(QHndf2ZBA5;(RcS>%!w4W|BJe|&Rsb#?pp>iX@w zPvgSaaeD5xOsDSfX=Lu!e{&;LhS=ulgR-5hK&*>BD3E3rB-`K8$uAYKH1mKdTp;aDSYnWM znf967*%;q*BvNh}Qa>d2{&^C6^&+jBxutL}&fAq^dm8z!Ikd%>L06Eyg3H+UD}fsc z`61#uFaEoyz zgwF($!=a&|P(4!BRX_6FfsTf!XKwRY#CM z#wL5F&lqcwf0&a+23L;fYUhu-rS%$d&cFZR!>6~s+rf3WcRRTF*ejyEYu!SHDgn*& z*QgIDN4%yo;M=RTs8Xw7JbM+Y6w-n;0~zI=35lgRF9;47-Dg_DsJ@CTin@XauagGLle+Nqo>zkadFGS8Q;(*#xWqw^% zy90XdgzMXJ&2UPVZ7N8xTRoDQkkU$|Z@W5VtJB4yF68CwLUP{6^ftB`YkD_36rNP# zZ#G;5I{Dl0PgI*rjKlcvXiG#nR+}}|aT~AG5{pGiuf2~9*#eAtttolB-_ci#un_KR~msUI# z#6;It&m=)N#zi+_%EU+4#6~w&^dxYs1AUTvIenDeP^q=X@Y$Wkoj545d!9T|xAE_R z?!s27=X^b5Pb!~VW4%?Lj#OU4>Nlv;;!$em9qRX9rhadGvuuMDR@xt`i(_v8u+~~; ze`{GQGxn;~mL7^8v#q@ThflC=?d1~i4fTI#$7jcx`oE($twa6Kv$V`2P#?WvE~eBM z_lL!Q=4XCJ%!O2ekcP&AoHI^-*E?OJZg6eYknS1U9Twb6;1s*b%T)Enx4YL5a-|+R z`o{sVPjtzj=%8Qf^zJs$eRa4tkL@QmpSwuO6uw5)f#c@^~6D)wr9cC^Z&1+Qm%gAjWmMw_d z@b)^OCn2;M;|`(8B~e5}*T(H-(rT7v)$*(LEujvfv#+seGnXG*laSQzCBMPsFTY^R zaMhZjj8oM+wb-QC=8EttC5cMggl_-5%6p3bio{~gyqA5vuc~UpJ9z#VlNc5x6(>+i znTe>ZkoKyN)E6oJTC2&4Qk;`f78QRlu*dEsC{<+o;o+M*dw&b9>iPdEmw;?I{~sM6 z=YId|e{;ZP-+vr^hg$n%p}Yk`V*Xw0 z$+WVA`;#>6lh42fVYG`D?Hcy|#Gj4MaidjpK-ir4Qa?QZJ>Y=ex@>~Rl5so(bTJ+v zBRw^WJ?Ns0Ow|@yj4wUtLtC}%`d?r-0I^>Muzvlw+wIP2cKsh8wGQk5IU4cs47xRh zCRo4-eAY#t&67O=7=J}gCPectm;k3BK`1$DMgpaQC7#8rUOn&w*S%&gv6o#mm=2k| z<{*H|8bY=p*PJbgQWe0f5mSQe$DI;1j~lJcBwexq3PN0SHi2psqb|PclAzaVv^$## zQszKUFSHTNh>#p_F;LYxT&5O}x~EyFFlJkj5eV}8HdD+^hkw*3{jqVho>a|tS>S{_ z#&s}piG87-)M~F@p%F}h1GNE3h8P9JIPw?=B*5k%g?c;z6bNw8mpM>PhR_+(@+uSx z4%050OX-VlGr@F&u%^Ro(Ue%+iKflyl*|GSmd_l^W>jKAfj1RCOCz&3^bMeyzPi7Bc&rw+z4D@&hal$Pjx_s|nx>V#B#@vw%u9zcvRfp^GFB z0KGy#tD&fWzeXV}!UkBNFT{1#m`xW4O&Lc^7BtWpq?(Q?b_F0ZRS(e@O!dP+fcl#w z_JuYSJw_#Gt_!@YzO-dl=RAh$qKC&k8W&H% zU5DXP1DMXpvd~2!B=a);2$c+ZAhClbuGtzS(Gj6n=#}ATeiLR+WC6Q5$?2F91C|O|=O8khJA< z08iU>&6=O&XQ;o3SPBWAWxIipz%82-No?#Y>SM421+C=`?j@QjPSt!e-)CF~RK1i8 z@qnV$qC;39$|z?{qRA2okWwvatQNgScVVz83V)L&f_v=wE}*XZI|I^14+{6W_#~95 zw_B|?daUJv9&5G4l2{2_!O~E2snpYjp09XScM(il?mbX>gm%>lb|9)opidV6q*jYSvfzXEziKVl^h zU89hqhZlyQYZp_9U%70iWZvzf6OGGAvM}U|HhjgGN2G}UQp|gLZ#rqUK9cY$hu;Da zNhsS7loWWmZk?!C!FbDwwVy~VHkZn2bbp>mBfF7*t*OJ)DN)NgRB=lCk3p$c#=}RF zxxhp1tFc(<4whY1*ES3byQ``jCz7IXHm} z^-B1|#o;Sq{OVM!Cy=S>tu?Qf)Z-UZNc!!7x`e`K<^~>ofaSSugAhLxIUhJcrGFNO zXpwvP{u9LuOkAa%x~N^Ga!#tLoTHr50IIOQb=hPZ2)O3#e!1dwl6tBU5-KEi-3WdK z8UwHV8})YMxN%Z1>eeaf*NHoJ8l6VlbQB;z;`=Kf=sV)VFz^yRz08aHkLGM>dWPEk zlkZ>&y|=kRU0{0-*I3TGsOf3b9e?{uzuyoK#u9t}fNGE)N$6rv*Sk zrs6$kfghdfMu454IgxNhZBQ!U-S}qd!wT1XkVa38C01u8Ba1a-_jXeTet*Ljq@mSU z=vOc{v@+t(XP}s=D;|UB?{P4llKU>IUyaAs99&Pm)&1YIaqq+BN>kM9=oMP&AY{B) z>hTYhDTN~b@f!8|Z$G_%jRwPaSFh2}=cD0Z`2IEOk49G`WritRI#e&33H2cUyXF!a z+^f+jRqBH5qUHkgrb{Nx@PA27uW89y7dbE?nAU2_sB;;1R!UQ=X#!LZdLfxt=ohAK zaP0ny22*WBpP%>CVpoQjIl$->6t}PtWJbY39;SGv48agDBE|wFRQDI^Tqc34Ln0RX zQE0_$f>NDAg*sJgkUmN^Ia@KG>2C= z{c%Iy%O}&=6#qGHwKM+Tlhf1Fga7wA+Ko~->tZ@ewamhkr~|&smY^Ld+EEc|l}4?0 z!;n1(`)&g=WLY#aT+bdJP-6rduCX#_(c`0H$sn^V z5pINMb?o~Y8(m?_A%8)Sk9A~OdguOMS5@Wf|8@?p4_q{)|HM$xhV|b$Iz7&;|I?E< z?Zf(ij`r}-{3luvud5A$DRDtAeK_;fwAgd#qJK6YA8R!Y*1z{Lb+ny8L(i`^Ep*C& zhI@FhkONaf0oAp=r{L3 z7=oy7#mIm<`rn$j+J*Kh8(s`|iq$aktc)NXN|-ETj*V}(Q7dL#gT%yqPNsh`v0q=`fYue!7$Hb%0x4YO_VVi0T>^&!B(sTyXfHoMQc0t zZ5pVslRgkr+kbS-YR__(jlaB%UpK+0dP%7522R;%N;tEb=QFAg3kmmn9?r*HT5P)Y z$U`gFnk>^}kAfT#@6+hZpRaNziV?2T9~8D+mp{;N0h6g&eSEB`vra?In_3Kuh7ihU zBgsy{l1u>Ex(?Zto|D9GRoB!2mw^|4L|5@4{ zp-va|v|;Z7m9=C-81{Y9T(oOZ$TN24sI?X;lUO`xxIGxC(Dhtq!q-r^TNrnh4w`O4 zi%@>saDP|Z#UF?g>HwAKSb+;4bhtSVVHKl~-Ww9pF4PTOxYC|qCfKaZXc4RV9WG}1 z)O3v2Ur0VN@!GkV;$#b6s!rU$a$RDropok4B2QMbk8$orppD2?)YQKCy5zwsQeyCj z+PQ?}Dli{u@XSUQzh5(i#0HQ?oyZ!9`y>($x_<>*XQta9C`mOlAR~b4q4uv8e5A;& zn;;)KdXu!*lgZ6m@iwVi(I%T6VXk+Yc*& z6@O%jBtBN|ZPK`nK6g4pW5v1vTN(D(oT)Qt{2U>59_^x0Kqc{@XHJl7$CrqN#Dr7f zku63X+qyANRw7)R%svc1^Bjw74*G=Gtf!Y%Bc1cmh?DPD+-Ab*7UOeDoXO?%y1oOSBChuVI{j>AESv+sX6 z@AXFg@%SGFfG%JMxCLQm*k*_duE+u;N>W)M&)S_Z46{ZXg4uUj8M;nni*6E+II}b@84De3)WOS~gqZlZ37SVq$Z<2=E51Nl z4&jN{aeN#V*cmBnF$B!8o3KFIog~B2q(vcj%cKrhP!_2m{eE@D8r1n^n+D!dQ|yak z6+MyG2xS6kAobT+t=J=x128EnmVeKexa2 zQ`~Npex2T1LFVDyD3_NUNw@h`~IKj-*8!xOPZdCV$;v22>XG z&ZLSIBZ@ItLVMW@5M{DUN;uNlbtZY*GR{vcJFLXS6z1M-hdH(~DxzOwfanxH3p`&g zQ}jyE7v=+IFf09+lhBoQY%=(sq8`y|e>M=mXWsdoNoF&*i*7EicVQzfyM>fPkcyNG zbLdQ)V&VoIZsr`soVkukWq*v@TuT2v$ZnRBQOzY2pvPA9j!+^Ac6;FB?l!r=D&ZNpw#sNP@Q zj(Z<&FR$L;UiLrtFRN0`=LVKc1N;{22&MB-T_>)LCAROh5-gnx!(e~-c`&*fe(Vo# zwAf#t-~3c^9s+46Dt~mEnnj`LXu^&E3Pv|i&}Qp#6?uEMGgG#6W}tanAN_;hROa!^(i=vKG!j_pYvi-#DaE4GayYc9dtU#^iVhihH_U>LQ#D81p=87P9cdI>-R<-j* zRmWMO+wD1=X@9rom^v<4MuAy`1_CVLobfZAXa4t>^;bo6|E(X^V^&L+n_;xT$z8~w zK3V9(lZX6zEmZMyCd^>cX6NRO!?^!@gzI?4@`{^GuNBNrJ4dl9-pFHKFPDcfx7i)` z#!mSzuo2mYEq`HC%rO(#DUbLRiEOrWVK6tr!AmDi{DoSD{GZUY-u+6LP5J*Pr^JWIWpWqWHIpH9F&>x_zV*pKX-{7!#b<}y9V{ib=Og$n zARL^y&e3PifKJ&JP1dmGD%TQ>h2gJ83c|2xw!TX;1zCG(yYP`YuqvUuR`VjHxjIEX z==*cL7?;7J!k ztzZ8~tY$Wy*_XOT zd;PbA^Wp8g(be##KkOB|Nb8hQLM3o)OR~r%DH=?J;eYt%{PObl?d8?mcb~?Ef#CGq zYne{n;nT?6!&Bf9HpC(#x1al?@!)E>6S2&VK&8a8BkSyAXq}B?>+EH4ooz?AF3g*w zS!52B5gPYzZa<#?`}XR(Kh!bP+u^5=JB=-O=~5|8Mej+ZylQlMgYo&>%l_@B>)!cI zzxQ@0T7Q`vp)$lahZ2V;V~XHDT*oOLP(=QQ$Nb7+e%fUY2K1sAaGSL!wr$V);| zmmMYWYf#@_8sqqR4lF#+tUj6!hWl8~&%XbmH-8#@?vHLq{r9)ytBVh}*XK7s@5Iz$L z4u_5&ADg}#%m@|D(hy1!6JI5%Anu&{BI{8kL3h=G1xxWzP4EoW1ZJPxR~RmiJqM*B z^Q@NL)LV>Mt5nO6ukFnt%--v}ZYqhsx_>zu_#G@QtZ#3&z7RRLhy!X%mHBm5?cV3L z6RvN>HNz=cwyD6tZuLlJLP{%5~URLr7pJW``F&s;PY;7xes4j0#y)K zU0Y6*1l<^0-GnI7b8m%u4@lb5OLi*I+YALMFjclD10 zV4vucKhZ_M)a$=$ObNUBmw&pys__dgzz}Ewe4+tnAnUJD-6cZ*Uz^bCA7-A^zZ|vi zgYLHjL45~jWvZsOsfbp(cC(0b2w}Teh>GK^I3`#Cl{?H_IGWd}?w66svw{LT6uN(Pl0`wk9E|-7`ymgUNrvmf@;3 zLm8*4cWSXYu+0_WRZ0?-wh3MKd6oAR{S}GDnt3n#cwbf3h8ODm8Iv3qB!3rBN|}kM ztdRDqkJJ|_{aUNZiPD{(Pbq~zYgHoTp-lf;G9i1A;O*w+e_)T@Nl>cD^uxp9_Mn1R z_5A;oOF%Z9|Jz4Lo$UQT$H#9D=l|zu8_xgFx)JC)gl}mJcp=4~oQH2k`p-{J+oBasiCAp%?E2nassPMY8sUw!wzn z{^F=Dl;E~^6IHvHD0TVIx^4!&MV!=xtmR@8#}X*6!m)wdNIHq`VbkyD9#rdJr){tQ zw?7GN)c+@MGWx%Ba(ejv|7U40rvGDu@}T(-n*Y!a?a&VG(4M*dUz0!=1qk+n%vk`a F006ykg027n diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml index 9ece72de3..38ef78cfe 100644 --- a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml @@ -91,7 +91,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} volumeMounts: - name: socket-dir @@ -114,17 +114,15 @@ spec: - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.controller.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml index 328a911e9..e5ac93ee1 100644 --- a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-node.yaml @@ -118,7 +118,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} - --v=2 resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - name: node-driver-registrar @@ -174,15 +174,12 @@ spec: - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" - "--enable-aznfs-mount={{ .Values.node.enableAznfsMount }}" - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: {{ .Values.node.livenessProbe.healthPort }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/csi-blob-controller.yaml b/deploy/csi-blob-controller.yaml index 9dc5738f9..70117c319 100644 --- a/deploy/csi-blob-controller.yaml +++ b/deploy/csi-blob-controller.yaml @@ -62,7 +62,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29632 + - --http-endpoint=localhost:29632 volumeMounts: - name: socket-dir mountPath: /csi @@ -81,17 +81,15 @@ spec: - "--metrics-address=0.0.0.0:29634" - "--user-agent-suffix=OSS-kubectl" ports: - - containerPort: 29632 - name: healthz - protocol: TCP - containerPort: 29634 name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29632 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/csi-blob-node.yaml b/deploy/csi-blob-node.yaml index ed540812f..791866e6a 100644 --- a/deploy/csi-blob-node.yaml +++ b/deploy/csi-blob-node.yaml @@ -81,7 +81,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29633 + - --http-endpoint=localhost:29633 - --v=2 resources: limits: @@ -131,15 +131,12 @@ spec: - "--user-agent-suffix=OSS-kubectl" - "--metrics-address=0.0.0.0:29635" - "--enable-aznfs-mount=true" - ports: - - containerPort: 29633 - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29633 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/v1.23.3/csi-blob-controller.yaml b/deploy/v1.23.3/csi-blob-controller.yaml index 06a11ae38..f7c42ea81 100644 --- a/deploy/v1.23.3/csi-blob-controller.yaml +++ b/deploy/v1.23.3/csi-blob-controller.yaml @@ -62,7 +62,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29632 + - --http-endpoint=localhost:29632 volumeMounts: - name: socket-dir mountPath: /csi @@ -81,17 +81,15 @@ spec: - "--metrics-address=0.0.0.0:29634" - "--user-agent-suffix=OSS-kubectl" ports: - - containerPort: 29632 - name: healthz - protocol: TCP - containerPort: 29634 name: metrics protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29632 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/v1.23.3/csi-blob-node.yaml b/deploy/v1.23.3/csi-blob-node.yaml index da2c3e95b..da4781ea1 100644 --- a/deploy/v1.23.3/csi-blob-node.yaml +++ b/deploy/v1.23.3/csi-blob-node.yaml @@ -81,7 +81,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29633 + - --http-endpoint=localhost:29633 - --v=2 resources: limits: @@ -129,15 +129,12 @@ spec: - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - "--nodeid=$(KUBE_NODE_NAME)" - "--user-agent-suffix=OSS-kubectl" - ports: - - containerPort: 29633 - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29633 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/deploy/v1.24.0/csi-blob-controller.yaml b/deploy/v1.24.0/csi-blob-controller.yaml index 415d5a055..9f9539c79 100644 --- a/deploy/v1.24.0/csi-blob-controller.yaml +++ b/deploy/v1.24.0/csi-blob-controller.yaml @@ -62,7 +62,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29632 + - --http-endpoint=localhost:29632 volumeMounts: - name: socket-dir mountPath: /csi diff --git a/deploy/v1.24.0/csi-blob-node.yaml b/deploy/v1.24.0/csi-blob-node.yaml index 805d95e90..e2d237b9b 100644 --- a/deploy/v1.24.0/csi-blob-node.yaml +++ b/deploy/v1.24.0/csi-blob-node.yaml @@ -81,7 +81,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s - - --health-port=29633 + - --http-endpoint=localhost:29633 - --v=2 resources: limits: @@ -131,15 +131,12 @@ spec: - "--user-agent-suffix=OSS-kubectl" - "--metrics-address=0.0.0.0:29635" - "--enable-aznfs-mount=true" - ports: - - containerPort: 29633 - name: healthz - protocol: TCP livenessProbe: failureThreshold: 5 httpGet: + host: localhost path: /healthz - port: healthz + port: 29633 initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 From ecfa4adf6594fcf35c23bff7835f034286ad0310 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:25:38 +0000 Subject: [PATCH 119/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.2 to 0.0.3. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.2...pkg/azclient/v0.0.3) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 8 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 17 +++ .../sdk/azcore/arm/policy/policy.go | 10 ++ .../sdk/azcore/arm/runtime/pipeline.go | 5 +- .../azcore/arm/runtime/policy_bearer_token.go | 1 + .../azcore/arm/runtime/policy_register_rp.go | 45 ++------ .../Azure/azure-sdk-for-go/sdk/azcore/ci.yml | 4 +- .../sdk/azcore/internal/exported/request.go | 10 +- .../internal/exported/response_error.go | 4 +- .../sdk/azcore/internal/log/log.go | 2 +- .../azcore/internal/pollers/async/async.go | 2 +- .../sdk/azcore/internal/pollers/body/body.go | 2 +- .../sdk/azcore/internal/pollers/fake/fake.go | 2 +- .../sdk/azcore/internal/pollers/loc/loc.go | 6 +- .../sdk/azcore/internal/pollers/op/op.go | 2 +- .../sdk/azcore/internal/pollers/util.go | 2 +- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/policy/policy.go | 10 ++ .../sdk/azcore/runtime/policy_bearer_token.go | 8 +- .../azcore/runtime/policy_key_credential.go | 21 ++-- .../azcore/runtime/policy_sas_credential.go | 20 ++-- .../sdk/azcore/runtime/poller.go | 4 +- .../sdk/azcore/runtime/request.go | 101 ++++++++++++++++-- .../sdk/azcore/runtime/response.go | 4 +- .../sdk/azcore/streaming/progress.go | 14 +++ vendor/modules.txt | 4 +- 27 files changed, 221 insertions(+), 93 deletions(-) diff --git a/go.mod b/go.mod index 9ba6358ca..d0bffe34b 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index b31f73702..93fe81b41 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 h1:U/kwEXj0Y+1REAkV4kV8VO1CsEp8tSaQDG/7qC5XuqQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= @@ -466,8 +466,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 h1:ItSTRGcTTVArwQwNOyRVqyIZwaa8QHrYPOGb8Dg5CZM= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5/go.mod h1:PAr/DSzJqB6Pwf609TVAtAMr8uErgASj5tY63w/SNEQ= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 h1:quGUBBYzHpEXd6PSNUS9UpheEcwW+AbDtZBIAuzS0jM= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2/go.mod h1:XG0A5DEGP8oKo97GeDAcd2rXOCVX22JeHNLpiDVgfZg= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index bf0c3e1aa..6f32eda55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,22 @@ # Release History +## 1.11.0 (2024-04-01) + +### Features Added + +* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration. +* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies. +* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name. + +### Bugs Fixed + +* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values. +* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure. + +### Other Changes + +* Update dependencies. + ## 1.10.0 (2024-02-29) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go index 83cf91e3e..f18caf848 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -20,6 +20,11 @@ type BearerTokenOptions struct { // policy's credential must support multitenant authentication. AuxiliaryTenants []string + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Scopes contains the list of permission scopes required for the token. Scopes []string } @@ -44,6 +49,11 @@ type RegistrationOptions struct { // The default valule is 5 minutes. // NOTE: Setting this to a small value might cause the policy to prematurely fail. PollingDuration time.Duration + + // StatusCodes contains the slice of custom HTTP status codes to use instead + // of the default http.StatusConflict. This should only be set if a service + // returns a non-standard HTTP status code when unregistered. + StatusCodes []int } // ClientOptions contains configuration settings for a client's pipeline. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go index 302c19cd4..039b758bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -30,8 +30,9 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr return azruntime.Pipeline{}, err } authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ - AuxiliaryTenants: options.AuxiliaryTenants, - Scopes: []string{conf.Audience + "/.default"}, + AuxiliaryTenants: options.AuxiliaryTenants, + InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, + Scopes: []string{conf.Audience + "/.default"}, }) perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 54b3bb78d..765fbc684 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -64,6 +64,7 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.scopes = make([]string, len(opts.Scopes)) copy(p.scopes, opts.Scopes) p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ OnChallenge: p.onChallenge, OnRequest: p.onRequest, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go index 83e15949a..810ac9d9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -8,7 +8,6 @@ package runtime import ( "context" - "errors" "fmt" "net/http" "net/url" @@ -16,6 +15,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -45,6 +45,9 @@ func setDefaults(r *armpolicy.RegistrationOptions) { if r.PollingDuration == 0 { r.PollingDuration = 5 * time.Minute } + if len(r.StatusCodes) == 0 { + r.StatusCodes = []int{http.StatusConflict} + } } // NewRPRegistrationPolicy creates a policy object configured using the specified options. @@ -88,7 +91,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // make the original request resp, err = req.Next() // getting a 409 is the first indication that the RP might need to be registered, check error response - if err != nil || resp.StatusCode != http.StatusConflict { + if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) { return resp, err } var reqErr requestError @@ -105,17 +108,12 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) // to the caller so its error unmarshalling will kick in return resp, err } - // RP needs to be registered. start by getting the subscription ID from the original request - subID, err := getSubscription(req.Raw().URL.Path) - if err != nil { - return resp, err - } - // now get the RP from the error - rp, err = getProvider(reqErr) + res, err := resource.ParseResourceID(req.Raw().URL.Path) if err != nil { return resp, err } - logRegistrationExit := func(v interface{}) { + rp = res.ResourceType.Namespace + logRegistrationExit := func(v any) { log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) } log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) @@ -124,7 +122,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) rpOps := &providersOperations{ p: r.pipeline, u: r.endpoint, - subID: subID, + subID: res.SubscriptionID, } if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { logRegistrationExit(err) @@ -189,36 +187,13 @@ func isUnregisteredRPCode(errorCode string) bool { return false } -func getSubscription(path string) (string, error) { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1], nil - } - } - return "", fmt.Errorf("failed to obtain subscription ID from %s", path) -} - -func getProvider(re requestError) (string, error) { - if len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0].Target, nil - } - return "", errors.New("unexpected empty Details") -} - // minimal error definitions to simplify detection type requestError struct { ServiceError *serviceError `json:"error"` } type serviceError struct { - Code string `json:"code"` - Details []serviceErrorDetails `json:"details"` -} - -type serviceErrorDetails struct { - Code string `json:"code"` - Target string `json:"target"` + Code string `json:"code"` } /////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml index aab921853..99348527b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -23,7 +23,7 @@ pr: - sdk/azcore/ - eng/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 8d1ae213c..3041984d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -51,15 +51,15 @@ type Request struct { values opValues } -type opValues map[reflect.Type]interface{} +type opValues map[reflect.Type]any // Set adds/changes a value -func (ov opValues) set(value interface{}) { +func (ov opValues) set(value any) { ov[reflect.TypeOf(value)] = value } // Get looks for a value set by SetValue first -func (ov opValues) get(value interface{}) bool { +func (ov opValues) get(value any) bool { v, ok := ov[reflect.ValueOf(value).Elem().Type()] if ok { reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) @@ -108,7 +108,7 @@ func (req *Request) Next() (*http.Response, error) { } // SetOperationValue adds/changes a mutable key/value associated with a single operation. -func (req *Request) SetOperationValue(value interface{}) { +func (req *Request) SetOperationValue(value any) { if req.values == nil { req.values = opValues{} } @@ -116,7 +116,7 @@ func (req *Request) SetOperationValue(value interface{}) { } // OperationValue looks for a value set by SetOperationValue(). -func (req *Request) OperationValue(value interface{}) bool { +func (req *Request) OperationValue(value any) bool { if req.values == nil { return false } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index bd348b868..08a954587 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -59,7 +59,7 @@ func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error } func extractErrorCodeJSON(body []byte) string { - var rawObj map[string]interface{} + var rawObj map[string]any if err := json.Unmarshal(body, &rawObj); err != nil { // not a JSON object return "" @@ -68,7 +68,7 @@ func extractErrorCodeJSON(body []byte) string { // check if this is a wrapped error, i.e. { "error": { ... } } // if so then unwrap it if wrapped, ok := rawObj["error"]; ok { - unwrapped, ok := wrapped.(map[string]interface{}) + unwrapped, ok := wrapped.(map[string]any) if !ok { return "" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go index 5cb87de2c..6fc6d1400 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -30,7 +30,7 @@ func Write(cls log.Event, msg string) { // Writef invokes the underlying listener with the specified event and formatted message. // If the event shouldn't be logged or there is no listener then Writef does nothing. -func Writef(cls log.Event, format string, a ...interface{}) { +func Writef(cls log.Event, format string, a ...any) { log.Writef(cls, format, a...) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index b05bd8b38..ccd4794e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -27,7 +27,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["asyncURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 2bb9e105b..0d781b31d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -29,7 +29,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 259834718..51aede8a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -26,7 +26,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["fakeURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index d6be89876..b0e62b1df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -28,7 +28,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { t, ok := token["type"] if !ok { return false @@ -103,6 +103,10 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } else if resp.StatusCode > 199 && resp.StatusCode < 300 { // any 2xx other than a 202 indicates success p.CurState = poller.StatusSucceeded + } else if resp.StatusCode == http.StatusTooManyRequests { + // the request is being throttled. DO NOT include + // this as a terminal failure. preserve the + // existing state and return the response. } else { p.CurState = poller.StatusFailed } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 1bc7ad0ac..ac1c0efb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -25,7 +25,7 @@ func Applicable(resp *http.Response) bool { } // CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { +func CanResume(token map[string]any) bool { _, ok := token["oplocURL"] return ok } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index d8d86a46c..08e8d24e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -74,7 +74,7 @@ func ExtractToken(token string) ([]byte, error) { // IsTokenValid returns an error if the specified token isn't applicable for generic type T. func IsTokenValid[T any](token string) error { - raw := map[string]interface{}{} + raw := map[string]any{} if err := json.Unmarshal([]byte(token), &raw); err != nil { return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 330bf9a60..4d3465109 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.10.0" + Version = "v1.11.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index d934f1dc5..8d9845358 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -39,6 +39,11 @@ type ClientOptions struct { // Cloud specifies a cloud for the client. The default is Azure Public Cloud. Cloud cloud.Configuration + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the credential in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Logging configures the built-in logging policy. Logging LogOptions @@ -147,6 +152,11 @@ type BearerTokenOptions struct { // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from // its given credential. AuthorizationHandler AuthorizationHandler + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index f0f280355..cb2a69528 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -24,6 +24,7 @@ type BearerTokenPolicy struct { authzHandler policy.AuthorizationHandler cred exported.TokenCredential scopes []string + allowHTTP bool } type acquiringResourceState struct { @@ -55,6 +56,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), + allowHTTP: opts.InsecureAllowCredentialWithHTTP, } } @@ -80,7 +82,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return req.Next() } - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil { return nil, err } @@ -113,8 +115,8 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return res, err } -func checkHTTPSForAuth(req *policy.Request) error { - if strings.ToLower(req.Raw().URL.Scheme) != "https" { +func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP { return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go index 6f577fa7a..eeb1c09cc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -12,13 +12,19 @@ import ( // KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. type KeyCredentialPolicy struct { - cred *exported.KeyCredential - header string - prefix string + cred *exported.KeyCredential + header string + prefix string + allowHTTP bool } // KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. type KeyCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. Prefix string } @@ -32,9 +38,10 @@ func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options options = &KeyCredentialPolicyOptions{} } return &KeyCredentialPolicy{ - cred: cred, - header: header, - prefix: options.Prefix, + cred: cred, + header: header, + prefix: options.Prefix, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -44,7 +51,7 @@ func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } val := exported.KeyCredentialGet(k.cred) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go index ebe2b7772..3964beea8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -12,13 +12,17 @@ import ( // SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. type SASCredentialPolicy struct { - cred *exported.SASCredential - header string + cred *exported.SASCredential + header string + allowHTTP bool } // SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. type SASCredentialPolicyOptions struct { - // placeholder for future optional values + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool } // NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. @@ -26,9 +30,13 @@ type SASCredentialPolicyOptions struct { // - header is the name of the HTTP request header in which the shared access signature is placed // - options contains optional configuration, pass nil to accept the default values func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + if options == nil { + options = &SASCredentialPolicyOptions{} + } return &SASCredentialPolicy{ - cred: cred, - header: header, + cred: cred, + header: header, + allowHTTP: options.InsecureAllowCredentialWithHTTP, } } @@ -38,7 +46,7 @@ func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { // this prevents a panic that might be hard to diagnose and allows testing // against http endpoints that don't require authentication. if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { return nil, err } req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index c373f6896..b0be7e5e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -154,7 +154,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options if err != nil { return nil, err } - var asJSON map[string]interface{} + var asJSON map[string]any if err := json.Unmarshal(raw, &asJSON); err != nil { return nil, err } @@ -240,7 +240,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } start := time.Now() - logPollUntilDoneExit := func(v interface{}) { + logPollUntilDoneExit := func(v any) { log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) } log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index bef05f2a3..06ac95b1b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -11,9 +11,11 @@ import ( "context" "encoding/json" "encoding/xml" + "errors" "fmt" "io" "mime/multipart" + "net/textproto" "net/url" "path" "strings" @@ -21,6 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -109,7 +112,7 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er } // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. -func MarshalAsJSON(req *policy.Request, v interface{}) error { +func MarshalAsJSON(req *policy.Request, v any) error { b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -119,7 +122,7 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error { } // MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. -func MarshalAsXML(req *policy.Request, v interface{}) error { +func MarshalAsXML(req *policy.Request, v any) error { b, err := xml.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -129,10 +132,10 @@ func MarshalAsXML(req *policy.Request, v interface{}) error { return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) } -// SetMultipartFormData writes the specified keys/values as multi-part form -// fields with the specified value. File content must be specified as a ReadSeekCloser. -// All other values are treated as string values. -func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { +// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value. +// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent]. +// Byte slices will be treated as JSON. All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]any) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) @@ -148,6 +151,60 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) return nil } + quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + + writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error { + if mpc.Body == nil { + return errors.New("streaming.MultipartContent.Body cannot be nil") + } + + // use fieldname for the file name when unspecified + filename := fieldname + + if mpc.ContentType == "" && mpc.Filename == "" { + return writeContent(fieldname, filename, mpc.Body) + } + if mpc.Filename != "" { + filename = mpc.Filename + } + // this is pretty much copied from multipart.Writer.CreateFormFile + // but lets us set the caller provided Content-Type and filename + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename))) + contentType := "application/octet-stream" + if mpc.ContentType != "" { + contentType = mpc.ContentType + } + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, mpc.Body); err != nil { + return err + } + return nil + } + + // the same as multipart.Writer.WriteField but lets us specify the Content-Type + writeField := func(fieldname, contentType string, value string) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname))) + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + if _, err = fd.Write([]byte(value)); err != nil { + return err + } + return nil + } + for k, v := range formData { if rsc, ok := v.(io.ReadSeekCloser); ok { if err := writeContent(k, k, rsc); err != nil { @@ -161,13 +218,35 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) } } continue + } else if mpc, ok := v.(streaming.MultipartContent); ok { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + continue + } else if mpcs, ok := v.([]streaming.MultipartContent); ok { + for _, mpc := range mpcs { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + } + continue } - // ensure the value is in string format - s, ok := v.(string) - if !ok { - s = fmt.Sprintf("%v", v) + + var content string + contentType := shared.ContentTypeTextPlain + switch tt := v.(type) { + case []byte: + // JSON, don't quote it + content = string(tt) + contentType = shared.ContentTypeAppJSON + case string: + content = tt + default: + // ensure the value is in string format + content = fmt.Sprintf("%v", v) } - if err := writer.WriteField(k, s); err != nil { + + if err := writeField(k, contentType, content); err != nil { return err } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index 003c875b1..048566e02 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -40,7 +40,7 @@ func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) } // UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsJSON(resp *http.Response, v interface{}) error { +func UnmarshalAsJSON(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err @@ -61,7 +61,7 @@ func UnmarshalAsJSON(resp *http.Response, v interface{}) error { } // UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsXML(resp *http.Response, v interface{}) error { +func UnmarshalAsXML(resp *http.Response, v any) error { payload, err := Payload(resp) if err != nil { return err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go index fbcd48311..2468540bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -73,3 +73,17 @@ func (p *progress) Seek(offset int64, whence int) (int64, error) { func (p *progress) Close() error { return p.rc.Close() } + +// MultipartContent contains streaming content used in multipart/form payloads. +type MultipartContent struct { + // Body contains the required content body. + Body io.ReadSeekCloser + + // ContentType optionally specifies the HTTP Content-Type for this Body. + // The default value is application/octet-stream. + ContentType string + + // Filename optionally specifies the filename for this Body. + // The default value is the field name for the multipart/form section. + Filename string +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 49f8b7d85..0e25d5321 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -1655,7 +1655,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.2 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From 9c4eccc410d3bbe2ab9d325bc1a6e6a6e6c31b6e Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 3 Apr 2024 02:48:52 +0000 Subject: [PATCH 120/157] fix: enable http with track2 sdk --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- .../pkg/azclient/utils/options.go | 5 +++-- .../pkg/azclient/utils/transport.go | 15 +++++++++------ 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index d0bffe34b..70b16f6ba 100644 --- a/go.mod +++ b/go.mod @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 93fe81b41..bd9610621 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 h1:ItSTRGcTTVArwQwNOyRVqyIZwaa8QHrYPOGb8Dg5CZM= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5/go.mod h1:PAr/DSzJqB6Pwf609TVAtAMr8uErgASj5tY63w/SNEQ= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 h1:2i90v/+RL6UmN9zE8bDxEy8DpEdIK9WiwmrGS6XoDgk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7/go.mod h1:4rMNajTN5xEbeVGFWfAeqorkednAmXM5/d8OK+ux+tY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e25d5321..546d46cf5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1613,7 +1613,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.5 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go index 435a48038..8f7d9c21b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go @@ -56,7 +56,8 @@ func GetDefaultAzCoreClientOption() policy.ClientOptions { Transport: &http.Client{ Transport: DefaultTransport, }, - TracingProvider: TracingProvider, - Cloud: cloud.AzurePublic, + TracingProvider: TracingProvider, + Cloud: cloud.AzurePublic, + InsecureAllowCredentialWithHTTP: true, } } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go index bc2523188..67ddcedc9 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go @@ -35,13 +35,16 @@ func init() { Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - MaxConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + MaxConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, // the same as default transport + ResponseHeaderTimeout: 60 * time.Second, TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateNever, // the same as default transport https://pkg.go.dev/crypto/tls#RenegotiationSupport }, } }) From e279b7f0a438735f02323fd6325b3d8fdf2228ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:11:09 +0000 Subject: [PATCH 121/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.7 to 0.0.9. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.7...pkg/azclient/v0.0.9) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 8 +- .../sdk/keyvault/azsecrets/CHANGELOG.md | 110 +++ .../sdk/keyvault/azsecrets/LICENSE.txt | 21 + .../sdk/keyvault/azsecrets/README.md | 144 ++++ .../sdk/keyvault/azsecrets/TROUBLESHOOTING.md | 4 + .../sdk/keyvault/azsecrets/autorest.md | 96 +++ .../sdk/keyvault/azsecrets/build.go | 10 + .../sdk/keyvault/azsecrets/ci.yml | 29 + .../sdk/keyvault/azsecrets/client.go | 650 ++++++++++++++++++ .../sdk/keyvault/azsecrets/constants.go | 63 ++ .../sdk/keyvault/azsecrets/custom_client.go | 63 ++ .../sdk/keyvault/azsecrets/models.go | 268 ++++++++ .../sdk/keyvault/azsecrets/models_serde.go | 501 ++++++++++++++ .../sdk/keyvault/azsecrets/response_types.go | 70 ++ .../keyvault/azsecrets/test-resources.json | 331 +++++++++ .../sdk/keyvault/azsecrets/time_unix.go | 62 ++ .../sdk/keyvault/azsecrets/version.go | 12 + .../sdk/keyvault/internal/CHANGELOG.md | 54 ++ .../sdk/keyvault/internal/LICENSE.txt | 21 + .../sdk/keyvault/internal/README.md | 23 + .../sdk/keyvault/internal/challenge_policy.go | 255 +++++++ .../sdk/keyvault/internal/ci.keyvault.yml | 28 + .../sdk/keyvault/internal/constants.go | 11 + .../sdk/keyvault/internal/doc.go | 7 + .../sdk/keyvault/internal/parse.go | 37 + vendor/modules.txt | 9 +- .../pkg/azclient/arm_conf.go | 4 + .../azclient/armauth/auxiliary_auth_policy.go | 59 ++ .../azclient/armauth/keyvault_credential.go | 102 +++ .../cloud-provider-azure/pkg/azclient/auth.go | 38 +- .../pkg/azclient/auth_conf.go | 8 + 32 files changed, 3091 insertions(+), 11 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go diff --git a/go.mod b/go.mod index 70b16f6ba..87eb8327c 100644 --- a/go.mod +++ b/go.mod @@ -156,12 +156,14 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) require ( + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect diff --git a/go.sum b/go.sum index bd9610621..de3a950a7 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= @@ -464,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 h1:2i90v/+RL6UmN9zE8bDxEy8DpEdIK9WiwmrGS6XoDgk= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7/go.mod h1:4rMNajTN5xEbeVGFWfAeqorkednAmXM5/d8OK+ux+tY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 h1:Dqfn77jXMi78OrCA0clZyqJtTeyxj7dBeF23lp/nT2s= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9/go.mod h1:/jwCAe6Nh26G687RvUbmd0CjW4GHHXI/xOAOCSF1XqQ= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md new file mode 100644 index 000000000..8a5c4cc9f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/CHANGELOG.md @@ -0,0 +1,110 @@ +# Release History + +## 0.12.0 (2023-04-13) + +### Features Added +* upgraded to api version 7.4 + +### Breaking Changes +* This module is now DEPRECATED. The latest supported version of this module is at github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets + +## 0.11.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.10.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.10.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.9.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azsecrets.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.8.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-secrets/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.7.1 (2022-05-12) + +### Other Changes +* Updated to latest `azcore` and `internal` modules. + +## 0.7.0 (2022-04-06) + +### Features Added +* Added `PossibleDeletionRecoveryLevelValues` to iterate over all valid `DeletionRecoveryLevel` values +* Implemented generic pagers from `runtime.Pager` for all List operations +* Added `Name *string` to `DeletedSecret`, `Properties`, `Secret`, `SecretItem`, and `SecretItem` +* Added `Client.VaultURL` to determine the vault URL for debugging +* Adding `ResumeToken` method to pollers for resuming polling at a later date by using the added `ResumeToken` optional parameter on client polling methods + +### Breaking Changes +* Requires a minimum version of go 1.18 +* Removed `RawResponse` from pollers +* Removed `DeletionRecoveryLevel` +* Polling operations return a Poller struct directly instead of a Response envelope +* Removed `ToPtr` methods +* `Client.UpdateSecretProperties` takes a `Secret` +* Renamed `Client.ListSecrets` to `Client.ListPropertiesOfSecrets` +* Renamed `Client.ListSecretVersions` to `Client.ListPropertiesOfSecretVersions` +* Renamed `DeletedDate` to `DeletedOn` and `Managed` to `IsManaged` +* Moved `ContentType`, `Tags`, `KeyID`, and `IsManaged` to `Properties` + +## 0.6.0 (2022-03-08) + +### Breaking Changes +* Changes `Attributes` to `Properties` +* Changes `Secret.KID` to `Secret.KeyID` +* Changes `DeletedSecretBundle` to `DeletedSecret` +* Changes `DeletedDate` to `DeletedOn`, `Created` to `CreatedOn`, and `Updated` to `UpdatedOn` +* Changes the signature of `Client.UpdateSecretProperties` to have all alterable properties in the `UpdateSecretPropertiesOptions` parameter, removing the `parameters Properties` parameter. +* Changes `Item` to `SecretItem` +* Pollers and pagers are structs instead of interfaces +* Prefixed all `DeletionRecoveryLevel` constants with "DeletionRecoveryLevel" +* Changed pager APIs for `ListSecretVersionsPager`, `ListDeletedSecretsPager`, and `ListSecretsPager` + * Use the `More()` method to determine if there are more pages to fetch + * Use the `NextPage(context.Context)` to fetch the next page of results +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.5.0 (2022-02-08) + +### Breaking Changes +* Fixes a bug where `UpdateSecretProperties` will delete properties that are not explicitly set each time. This is only a breaking change at runtime, where the request body will change. + +## 0.4.0 (2022-01-11) + +### Other Changes +* Bumps `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.3.0 (2021-11-09) + +### Features Added +* Clients can now connect to Key Vaults in any cloud + +## 0.2.0 (2021-11-02) + +### Other Changes +* Bumps `azcore` dependency to `v0.20.0` and `azidentity` to `v0.12.0` + +## 0.1.1 (2021-10-06) +* Adds the MIT License for redistribution + +## 0.1.0 (2021-10-05) +* This is the initial release of the `azsecrets` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md new file mode 100644 index 000000000..06a23c841 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/README.md @@ -0,0 +1,144 @@ +# Azure Key Vault Secrets client module for Go +> Deprecated: use github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets instead + +Azure Key Vault helps solve the following problems: +* Secrets management (this module) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets +* Cryptographic key management ([azkeys](https://azsdk/go/keyvault-keys/docs)) - create, store, and control access to the keys used to encrypt your data +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates + +[Source code][module_source] | [Package (pkg.go.dev)][reference_docs] | [Product documentation][keyvault_docs] | [Samples][secrets_samples] + +## Getting started + +### Install packages + +Install `azsecrets` and `azidentity` with `go get`: +``` +go get github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client also requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```golang +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client := azsecrets.NewClient("https://.vault.azure.net", cred, nil) +} +``` + +## Key concepts + +### Secret + +A secret consists of a secret value and its associated metadata and management information. This library handles secret values as strings, but Azure Key Vault doesn't store them as such. For more information about secrets and how Key Vault stores and manages them, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/general/about-keys-secrets-certificates). + +`azseecrets.Client` can set secret values in the vault, update secret metadata, and delete secrets, as shown in the examples below. + +## Examples + +Get started with our [examples](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets#pkg-examples). + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetSecret(context.Background(), "secretName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in credential logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetSecret(ctx, "secretName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +See the [API reference documentation][reference_docs] for complete documentation of this module. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[default_cred_ref]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +[reference_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs#Client +[module_source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azsecrets +[secrets_samples]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/keyvault/azsecrets/example_test.go + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fkeyvault%2Fazsecrets%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md new file mode 100644 index 000000000..72269c7a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Secrets Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md new file mode 100644 index 000000000..e6cfb9f8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/autorest.md @@ -0,0 +1,96 @@ +## Go + +These settings apply only when `--go` is specified on the command line. + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/551275acb80e1f8b39036b79dfc35a8f63b601a7/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.4/secrets.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +openapi-type: "data-plane" +output-folder: ../azsecrets +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.46" +version: "^3.0.0" + +directive: + # delete unused model + - remove-model: SecretProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # rename parameter models to match their methods + - rename-model: + from: SecretRestoreParameters + to: RestoreSecretParameters + - rename-model: + from: SecretSetParameters + to: SetSecretParameters + - rename-model: + from: SecretUpdateParameters + to: UpdateSecretParameters + + # rename paged operations from Get* to List* + - rename-operation: + from: GetDeletedSecrets + to: ListDeletedSecrets + - rename-operation: + from: GetSecrets + to: ListSecrets + - rename-operation: + from: GetSecretVersions + to: ListSecretVersions + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif secretVersion == "" \{\s+.+secretVersion cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - response_types.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # make secret IDs a convenience type so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(\sID \*)string(\s+.*)/g, "$1ID$2") + + # Maxresults -> MaxResults + - from: + - client.go + - models.go + where: $ + transform: return $.replace(/Maxresults/g, "MaxResults") + + # secretName, secretVersion -> name, version + - from: client.go + - where: $ + - transform: return $.replace(/secretName/g, "name").replace(/secretVersion/g, "version") +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go new file mode 100644 index 000000000..d6e487f2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml new file mode 100644 index 000000000..3e196e0c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/ci.yml @@ -0,0 +1,29 @@ + +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azsecrets + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/azsecrets + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'keyvault/azsecrets' + RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go new file mode 100644 index 000000000..fdfbabf96 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/client.go @@ -0,0 +1,650 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupSecret - Requests that a backup of the specified secret be downloaded to the client. All versions of the secret will +// be downloaded. This operation requires the secrets/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +func (client *Client) BackupSecret(ctx context.Context, name string, options *BackupSecretOptions) (BackupSecretResponse, error) { + req, err := client.backupSecretCreateRequest(ctx, name, options) + if err != nil { + return BackupSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BackupSecretResponse{}, runtime.NewResponseError(resp) + } + return client.backupSecretHandleResponse(resp) +} + +// backupSecretCreateRequest creates the BackupSecret request. +func (client *Client) backupSecretCreateRequest(ctx context.Context, name string, options *BackupSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupSecretHandleResponse handles the BackupSecret response. +func (client *Client) backupSecretHandleResponse(resp *http.Response) (BackupSecretResponse, error) { + result := BackupSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupSecretResult); err != nil { + return BackupSecretResponse{}, err + } + return result, nil +} + +// DeleteSecret - The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an individual +// version of a secret. This operation requires the secrets/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +func (client *Client) DeleteSecret(ctx context.Context, name string, options *DeleteSecretOptions) (DeleteSecretResponse, error) { + req, err := client.deleteSecretCreateRequest(ctx, name, options) + if err != nil { + return DeleteSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DeleteSecretResponse{}, runtime.NewResponseError(resp) + } + return client.deleteSecretHandleResponse(resp) +} + +// deleteSecretCreateRequest creates the DeleteSecret request. +func (client *Client) deleteSecretCreateRequest(ctx context.Context, name string, options *DeleteSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteSecretHandleResponse handles the DeleteSecret response. +func (client *Client) deleteSecretHandleResponse(resp *http.Response) (DeleteSecretResponse, error) { + result := DeleteSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretBundle); err != nil { + return DeleteSecretResponse{}, err + } + return result, nil +} + +// GetDeletedSecret - The Get Deleted Secret operation returns the specified deleted secret along with its attributes. This +// operation requires the secrets/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +func (client *Client) GetDeletedSecret(ctx context.Context, name string, options *GetDeletedSecretOptions) (GetDeletedSecretResponse, error) { + req, err := client.getDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return client.getDeletedSecretHandleResponse(resp) +} + +// getDeletedSecretCreateRequest creates the GetDeletedSecret request. +func (client *Client) getDeletedSecretCreateRequest(ctx context.Context, name string, options *GetDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedSecretHandleResponse handles the GetDeletedSecret response. +func (client *Client) getDeletedSecretHandleResponse(resp *http.Response) (GetDeletedSecretResponse, error) { + result := GetDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretBundle); err != nil { + return GetDeletedSecretResponse{}, err + } + return result, nil +} + +// GetSecret - The GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the secrets/get +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - version - The version of the secret. This URI fragment is optional. If not specified, the latest version of the secret +// is returned. +// - options - GetSecretOptions contains the optional parameters for the Client.GetSecret method. +func (client *Client) GetSecret(ctx context.Context, name string, version string, options *GetSecretOptions) (GetSecretResponse, error) { + req, err := client.getSecretCreateRequest(ctx, name, version, options) + if err != nil { + return GetSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetSecretResponse{}, runtime.NewResponseError(resp) + } + return client.getSecretHandleResponse(resp) +} + +// getSecretCreateRequest creates the GetSecret request. +func (client *Client) getSecretCreateRequest(ctx context.Context, name string, version string, options *GetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getSecretHandleResponse handles the GetSecret response. +func (client *Client) getSecretHandleResponse(resp *http.Response) (GetSecretResponse, error) { + result := GetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return GetSecretResponse{}, err + } + return result, nil +} + +// NewListDeletedSecretsPager - The Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled +// for soft-delete. This operation requires the secrets/list permission. +// +// Generated from API version 7.4 +// - options - ListDeletedSecretsOptions contains the optional parameters for the Client.NewListDeletedSecretsPager method. +func (client *Client) NewListDeletedSecretsPager(options *ListDeletedSecretsOptions) *runtime.Pager[ListDeletedSecretsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedSecretsResponse]{ + More: func(page ListDeletedSecretsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedSecretsResponse) (ListDeletedSecretsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listDeletedSecretsCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListDeletedSecretsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListDeletedSecretsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedSecretsResponse{}, runtime.NewResponseError(resp) + } + return client.listDeletedSecretsHandleResponse(resp) + }, + }) +} + +// listDeletedSecretsCreateRequest creates the ListDeletedSecrets request. +func (client *Client) listDeletedSecretsCreateRequest(ctx context.Context, options *ListDeletedSecretsOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedSecretsHandleResponse handles the ListDeletedSecrets response. +func (client *Client) listDeletedSecretsHandleResponse(resp *http.Response) (ListDeletedSecretsResponse, error) { + result := ListDeletedSecretsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretListResult); err != nil { + return ListDeletedSecretsResponse{}, err + } + return result, nil +} + +// NewListSecretVersionsPager - The full secret identifier and attributes are provided in the response. No values are returned +// for the secrets. This operations requires the secrets/list permission. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - ListSecretVersionsOptions contains the optional parameters for the Client.NewListSecretVersionsPager method. +func (client *Client) NewListSecretVersionsPager(name string, options *ListSecretVersionsOptions) *runtime.Pager[ListSecretVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretVersionsResponse]{ + More: func(page ListSecretVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretVersionsResponse) (ListSecretVersionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listSecretVersionsCreateRequest(ctx, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListSecretVersionsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListSecretVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSecretVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listSecretVersionsHandleResponse(resp) + }, + }) +} + +// listSecretVersionsCreateRequest creates the ListSecretVersions request. +func (client *Client) listSecretVersionsCreateRequest(ctx context.Context, name string, options *ListSecretVersionsOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretVersionsHandleResponse handles the ListSecretVersions response. +func (client *Client) listSecretVersionsHandleResponse(resp *http.Response) (ListSecretVersionsResponse, error) { + result := ListSecretVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretListResult); err != nil { + return ListSecretVersionsResponse{}, err + } + return result, nil +} + +// NewListSecretsPager - The Get Secrets operation is applicable to the entire vault. However, only the base secret identifier +// and its attributes are provided in the response. Individual secret versions are not listed in the +// response. This operation requires the secrets/list permission. +// +// Generated from API version 7.4 +// - options - ListSecretsOptions contains the optional parameters for the Client.NewListSecretsPager method. +func (client *Client) NewListSecretsPager(options *ListSecretsOptions) *runtime.Pager[ListSecretsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretsResponse]{ + More: func(page ListSecretsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretsResponse) (ListSecretsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listSecretsCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListSecretsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListSecretsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSecretsResponse{}, runtime.NewResponseError(resp) + } + return client.listSecretsHandleResponse(resp) + }, + }) +} + +// listSecretsCreateRequest creates the ListSecrets request. +func (client *Client) listSecretsCreateRequest(ctx context.Context, options *ListSecretsOptions) (*policy.Request, error) { + urlPath := "/secrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.MaxResults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.MaxResults), 10)) + } + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretsHandleResponse handles the ListSecrets response. +func (client *Client) listSecretsHandleResponse(resp *http.Response) (ListSecretsResponse, error) { + result := ListSecretsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretListResult); err != nil { + return ListSecretsResponse{}, err + } + return result, nil +} + +// PurgeDeletedSecret - The purge deleted secret operation removes the secret permanently, without the possibility of recovery. +// This operation can only be enabled on a soft-delete enabled vault. This operation requires the +// secrets/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - options - PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +func (client *Client) PurgeDeletedSecret(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (PurgeDeletedSecretResponse, error) { + req, err := client.purgeDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PurgeDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return PurgeDeletedSecretResponse{}, nil +} + +// purgeDeletedSecretCreateRequest creates the PurgeDeletedSecret request. +func (client *Client) purgeDeletedSecretCreateRequest(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedSecret - Recovers the deleted secret in the specified vault. This operation can only be performed on a soft-delete +// enabled vault. This operation requires the secrets/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the deleted secret. +// - options - RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +func (client *Client) RecoverDeletedSecret(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (RecoverDeletedSecretResponse, error) { + req, err := client.recoverDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RecoverDeletedSecretResponse{}, runtime.NewResponseError(resp) + } + return client.recoverDeletedSecretHandleResponse(resp) +} + +// recoverDeletedSecretCreateRequest creates the RecoverDeletedSecret request. +func (client *Client) recoverDeletedSecretCreateRequest(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedSecretHandleResponse handles the RecoverDeletedSecret response. +func (client *Client) recoverDeletedSecretHandleResponse(resp *http.Response) (RecoverDeletedSecretResponse, error) { + result := RecoverDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return RecoverDeletedSecretResponse{}, err + } + return result, nil +} + +// RestoreSecret - Restores a backed up secret, and all its versions, to a vault. This operation requires the secrets/restore +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The parameters to restore the secret. +// - options - RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +func (client *Client) RestoreSecret(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (RestoreSecretResponse, error) { + req, err := client.restoreSecretCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestoreSecretResponse{}, runtime.NewResponseError(resp) + } + return client.restoreSecretHandleResponse(resp) +} + +// restoreSecretCreateRequest creates the RestoreSecret request. +func (client *Client) restoreSecretCreateRequest(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// restoreSecretHandleResponse handles the RestoreSecret response. +func (client *Client) restoreSecretHandleResponse(resp *http.Response) (RestoreSecretResponse, error) { + result := RestoreSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return RestoreSecretResponse{}, err + } + return result, nil +} + +// SetSecret - The SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key Vault +// creates a new version of that secret. This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters for setting the secret. +// - options - SetSecretOptions contains the optional parameters for the Client.SetSecret method. +func (client *Client) SetSecret(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (SetSecretResponse, error) { + req, err := client.setSecretCreateRequest(ctx, name, parameters, options) + if err != nil { + return SetSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SetSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SetSecretResponse{}, runtime.NewResponseError(resp) + } + return client.setSecretHandleResponse(resp) +} + +// setSecretCreateRequest creates the SetSecret request. +func (client *Client) setSecretCreateRequest(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// setSecretHandleResponse handles the SetSecret response. +func (client *Client) setSecretHandleResponse(resp *http.Response) (SetSecretResponse, error) { + result := SetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return SetSecretResponse{}, err + } + return result, nil +} + +// UpdateSecret - The UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not +// specified in the request are left unchanged. The value of a secret itself cannot be changed. +// This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the secret. +// - version - The version of the secret. +// - parameters - The parameters for update secret operation. +// - options - UpdateSecretOptions contains the optional parameters for the Client.UpdateSecret method. +func (client *Client) UpdateSecret(ctx context.Context, name string, version string, parameters UpdateSecretParameters, options *UpdateSecretOptions) (UpdateSecretResponse, error) { + req, err := client.updateSecretCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateSecretResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateSecretResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateSecretResponse{}, runtime.NewResponseError(resp) + } + return client.updateSecretHandleResponse(resp) +} + +// updateSecretCreateRequest creates the UpdateSecret request. +func (client *Client) updateSecretCreateRequest(ctx context.Context, name string, version string, parameters UpdateSecretParameters, options *UpdateSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateSecretHandleResponse handles the UpdateSecret response. +func (client *Client) updateSecretHandleResponse(resp *http.Response) (UpdateSecretResponse, error) { + result := UpdateSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil { + return UpdateSecretResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go new file mode 100644 index 000000000..d897d67a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/constants.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +// DeletionRecoveryLevel - Reflects the deletion recovery level currently in effect for secrets in the current vault. If it +// contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the +// system can purge the secret, at the end of the retention interval. +type DeletionRecoveryLevel string + +const ( + // DeletionRecoveryLevelCustomizedRecoverable - Denotes a vault state in which deletion is recoverable without the possibility + // for immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level guarantees the recoverability + // of the deleted entity during the retention interval and while the subscription is still available. + DeletionRecoveryLevelCustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" + // DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion + // is recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription itself cannot + // be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level guarantees the recoverability of the deleted + // entity during the retention interval, and also reflects the fact that the subscription itself cannot be cancelled. + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" + // DeletionRecoveryLevelCustomizedRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which + // also permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This level guarantees + // the recoverability of the deleted entity during the retention interval, unless a Purge operation is requested, or the subscription + // is cancelled. + DeletionRecoveryLevelCustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" + // DeletionRecoveryLevelPurgeable - Denotes a vault state in which deletion is an irreversible operation, without the possibility + // for recovery. This level corresponds to no protection being available against a Delete operation; the data is irretrievably + // lost upon accepting a Delete operation at the entity level or higher (vault, resource group, subscription etc.) + DeletionRecoveryLevelPurgeable DeletionRecoveryLevel = "Purgeable" + // DeletionRecoveryLevelRecoverable - Denotes a vault state in which deletion is recoverable without the possibility for immediate + // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the retention + // interval(90 days) and while the subscription is still available. System wil permanently delete it after 90 days, if not + // recovered + DeletionRecoveryLevelRecoverable DeletionRecoveryLevel = "Recoverable" + // DeletionRecoveryLevelRecoverableProtectedSubscription - Denotes a vault and subscription state in which deletion is recoverable + // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and in which the subscription + // itself cannot be permanently canceled. System wil permanently delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" + // DeletionRecoveryLevelRecoverablePurgeable - Denotes a vault state in which deletion is recoverable, and which also permits + // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity during the + // retention interval (90 days), unless a Purge operation is requested, or the subscription is cancelled. System wil permanently + // delete it after 90 days, if not recovered + DeletionRecoveryLevelRecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" +) + +// PossibleDeletionRecoveryLevelValues returns the possible values for the DeletionRecoveryLevel const type. +func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { + return []DeletionRecoveryLevel{ + DeletionRecoveryLevelCustomizedRecoverable, + DeletionRecoveryLevelCustomizedRecoverableProtectedSubscription, + DeletionRecoveryLevelCustomizedRecoverablePurgeable, + DeletionRecoveryLevelPurgeable, + DeletionRecoveryLevelRecoverable, + DeletionRecoveryLevelRecoverableProtectedSubscription, + DeletionRecoveryLevelRecoverablePurgeable, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go new file mode 100644 index 000000000..04500bd08 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/custom_client.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's secrets. You should validate that +// vaultURL references a valid Key Vault. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient("azsecrets.Client", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a secret's unique ID, containing its name and version. +type ID string + +// Name of the secret. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the secret. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go new file mode 100644 index 000000000..87be9ab52 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models.go @@ -0,0 +1,268 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import "time" + +// BackupSecretResult - The backup secret result, containing the backup blob. +type BackupSecretResult struct { + // READ-ONLY; The backup blob containing the backed up secret. + Value []byte `json:"value,omitempty" azure:"ro"` +} + +// BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +type BackupSecretOptions struct { + // placeholder for future optional parameters +} + +// DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +type DeleteSecretOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +type GetDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// GetSecretOptions contains the optional parameters for the Client.GetSecret method. +type GetSecretOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedSecretsOptions contains the optional parameters for the Client.NewListDeletedSecretsPager method. +type ListDeletedSecretsOptions struct { + // Maximum number of results to return in a page. If not specified the service will return up to 25 results. + MaxResults *int32 +} + +// ListSecretVersionsOptions contains the optional parameters for the Client.NewListSecretVersionsPager method. +type ListSecretVersionsOptions struct { + // Maximum number of results to return in a page. If not specified, the service will return up to 25 results. + MaxResults *int32 +} + +// ListSecretsOptions contains the optional parameters for the Client.NewListSecretsPager method. +type ListSecretsOptions struct { + // Maximum number of results to return in a page. If not specified, the service will return up to 25 results. + MaxResults *int32 +} + +// PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +type PurgeDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +type RecoverDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +type RestoreSecretOptions struct { + // placeholder for future optional parameters +} + +// SetSecretOptions contains the optional parameters for the Client.SetSecret method. +type SetSecretOptions struct { + // placeholder for future optional parameters +} + +// UpdateSecretOptions contains the optional parameters for the Client.UpdateSecret method. +type UpdateSecretOptions struct { + // placeholder for future optional parameters +} + +// DeletedSecretBundle - A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on +// when it will be purged. +type DeletedSecretBundle struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + + // The secret id. + ID *ID `json:"id,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // The secret value. + Value *string `json:"value,omitempty"` + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + Kid *string `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedSecretItem - The deleted secret item containing metadata about the deleted secret. +type DeletedSecretItem struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // Secret identifier. + ID *ID `json:"id,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedSecretListResult - The deleted secret list result +type DeletedSecretListResult struct { + // READ-ONLY; The URL to get the next set of deleted secrets. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page + // of deleted secrets + Value []*DeletedSecretItem `json:"value,omitempty" azure:"ro"` +} + +// RestoreSecretParameters - The secret restore parameters. +type RestoreSecretParameters struct { + // REQUIRED; The backup blob associated with a secret bundle. + SecretBundleBackup []byte `json:"value,omitempty"` +} + +// SecretAttributes - The secret management attributes. +type SecretAttributes struct { + // Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Expiry date in UTC. + Expires *time.Time `json:"exp,omitempty"` + + // Not before date in UTC. + NotBefore *time.Time `json:"nbf,omitempty"` + + // READ-ONLY; Creation time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty" azure:"ro"` + + // READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', + // the secret can be permanently deleted by a privileged user; otherwise, only the + // system can purge the secret, at the end of the retention interval. + RecoveryLevel *DeletionRecoveryLevel `json:"recoveryLevel,omitempty" azure:"ro"` + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// SecretBundle - A secret consisting of a value, id and its attributes. +type SecretBundle struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // The content type of the secret. + ContentType *string `json:"contentType,omitempty"` + + // The secret id. + ID *ID `json:"id,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // The secret value. + Value *string `json:"value,omitempty"` + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + Kid *string `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// SecretItem - The secret item containing secret metadata. +type SecretItem struct { + // The secret management attributes. + Attributes *SecretAttributes `json:"attributes,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // Secret identifier. + ID *ID `json:"id,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// SecretListResult - The secret list result. +type SecretListResult struct { + // READ-ONLY; The URL to get the next set of secrets. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. + Value []*SecretItem `json:"value,omitempty" azure:"ro"` +} + +// SetSecretParameters - The secret set parameters. +type SetSecretParameters struct { + // REQUIRED; The value of the secret. + Value *string `json:"value,omitempty"` + + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// UpdateSecretParameters - The secret update parameters. +type UpdateSecretParameters struct { + // Type of the secret value such as a password. + ContentType *string `json:"contentType,omitempty"` + + // The secret management attributes. + SecretAttributes *SecretAttributes `json:"attributes,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go new file mode 100644 index 000000000..72d554127 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/models_serde.go @@ -0,0 +1,501 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupSecretResult. +func (b BackupSecretResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupSecretResult. +func (b *BackupSecretResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretBundle. +func (d DeletedSecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "kid", d.Kid) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretBundle. +func (d *DeletedSecretBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "Kid", &d.Kid) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretItem. +func (d DeletedSecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretItem. +func (d *DeletedSecretItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretListResult. +func (d DeletedSecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretListResult. +func (d *DeletedSecretListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreSecretParameters. +func (r RestoreSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.SecretBundleBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreSecretParameters. +func (r *RestoreSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.SecretBundleBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretAttributes. +func (s SecretAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", s.Created) + populate(objectMap, "enabled", s.Enabled) + populateTimeUnix(objectMap, "exp", s.Expires) + populateTimeUnix(objectMap, "nbf", s.NotBefore) + populate(objectMap, "recoverableDays", s.RecoverableDays) + populate(objectMap, "recoveryLevel", s.RecoveryLevel) + populateTimeUnix(objectMap, "updated", s.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretAttributes. +func (s *SecretAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &s.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &s.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &s.Expires) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &s.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &s.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &s.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &s.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretBundle. +func (s SecretBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "kid", s.Kid) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretBundle. +func (s *SecretBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "Kid", &s.Kid) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretItem. +func (s SecretItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretItem. +func (s *SecretItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretListResult. +func (s SecretListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretListResult. +func (s *SecretListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SetSecretParameters. +func (s SetSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "attributes", s.SecretAttributes) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SetSecretParameters. +func (s *SetSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &s.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateSecretParameters. +func (u UpdateSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", u.ContentType) + populate(objectMap, "attributes", u.SecretAttributes) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateSecretParameters. +func (u *UpdateSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &u.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &u.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go new file mode 100644 index 000000000..517acaabb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/response_types.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +// BackupSecretResponse contains the response from method Client.BackupSecret. +type BackupSecretResponse struct { + BackupSecretResult +} + +// DeleteSecretResponse contains the response from method Client.DeleteSecret. +type DeleteSecretResponse struct { + DeletedSecretBundle +} + +// GetDeletedSecretResponse contains the response from method Client.GetDeletedSecret. +type GetDeletedSecretResponse struct { + DeletedSecretBundle +} + +// GetSecretResponse contains the response from method Client.GetSecret. +type GetSecretResponse struct { + SecretBundle +} + +// ListDeletedSecretsResponse contains the response from method Client.NewListDeletedSecretsPager. +type ListDeletedSecretsResponse struct { + DeletedSecretListResult +} + +// ListSecretVersionsResponse contains the response from method Client.NewListSecretVersionsPager. +type ListSecretVersionsResponse struct { + SecretListResult +} + +// ListSecretsResponse contains the response from method Client.NewListSecretsPager. +type ListSecretsResponse struct { + SecretListResult +} + +// PurgeDeletedSecretResponse contains the response from method Client.PurgeDeletedSecret. +type PurgeDeletedSecretResponse struct { + // placeholder for future response values +} + +// RecoverDeletedSecretResponse contains the response from method Client.RecoverDeletedSecret. +type RecoverDeletedSecretResponse struct { + SecretBundle +} + +// RestoreSecretResponse contains the response from method Client.RestoreSecret. +type RestoreSecretResponse struct { + SecretBundle +} + +// SetSecretResponse contains the response from method Client.SetSecret. +type SetSecretResponse struct { + SecretBundle +} + +// UpdateSecretResponse contains the response from method Client.UpdateSecret. +type UpdateSecretResponse struct { + SecretBundle +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json new file mode 100644 index 000000000..20f726f33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/test-resources.json @@ -0,0 +1,331 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "southcentralus", + "allowedValues": [ + "australiacentral", + "canadacentral", + "centralus", + "eastasia", + "eastus2", + "koreacentral", + "northeurope", + "southafricanorth", + "southcentralus", + "southeastasia", + "switzerlandnorth", + "uksouth", + "westeurope", + "westus" + ], + "metadata": { + "description": "The location of the Managed HSM. By default, this is 'southcentralus'." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "accessPolicies": [ + { + "tenantId": "[parameters('tenantId')]", + "objectId": "[parameters('testApplicationOid')]", + "permissions": { + "keys": [ + "backup", + "create", + "decrypt", + "delete", + "encrypt", + "get", + "import", + "list", + "purge", + "recover", + "release", + "restore", + "rotate", + "sign", + "unwrapKey", + "update", + "verify", + "wrapKey" + ], + "secrets": [ + "backup", + "delete", + "get", + "list", + "purge", + "recover", + "restore", + "set" + ], + "certificates": [ + "backup", + "create", + "delete", + "deleteissuers", + "get", + "getissuers", + "import", + "list", + "listissuers", + "managecontacts", + "manageissuers", + "purge", + "recover", + "restore", + "setissuers", + "update" + ] + } + } + ], + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "condition": "[parameters('enableHsm')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go new file mode 100644 index 000000000..ed8ce0f9d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/time_unix.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go new file mode 100644 index 000000000..885158bcb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azsecrets + +const ( + moduleName = "azsecrets" + version = "v0.12.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md new file mode 100644 index 000000000..95d77dc95 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/CHANGELOG.md @@ -0,0 +1,54 @@ +# Release History + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md new file mode 100644 index 000000000..bd4826705 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/README.md @@ -0,0 +1,23 @@ +# Key Vault Internal Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal) + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go new file mode 100644 index 000000000..4cc1e429a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/challenge_policy.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const ( + headerAuthorization = "Authorization" + challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + bearerHeader = "Bearer " +) + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type KeyVaultChallengePolicy struct { + // mainResource is the resource to be retrieved using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + cred azcore.TokenCredential + scope *string + tenantID *string + verifyChallengeResource bool +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) *KeyVaultChallengePolicy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + return &KeyVaultChallengePolicy{ + cred: cred, + mainResource: temporal.NewResource(acquire), + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } +} + +func (k *KeyVaultChallengePolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: k, + req: req, + } + + if k.scope == nil || k.tenantID == nil { + // First request, get both to get the token + challengeReq, err := k.getChallengeRequest(*req) + if err != nil { + return nil, err + } + + resp, err := challengeReq.Next() + if err != nil { + return nil, err + } + + if resp.StatusCode > 399 && resp.StatusCode != http.StatusUnauthorized { + // the request failed for some other reason, don't try any further + return resp, nil + } + err = k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + return nil, err + } + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return nil, err + } + + req.Raw().Header.Set( + headerAuthorization, + fmt.Sprintf("%s%s", bearerHeader, tk.Token), + ) + + // send a copy of the request + cloneReq := req.Clone(req.Raw().Context()) + resp, cloneReqErr := cloneReq.Next() + if cloneReqErr != nil { + return nil, cloneReqErr + } + + // If it fails and has a 401, try it with a new token + if resp.StatusCode == 401 { + // Force a new token + k.mainResource.Expire() + + // Find the scope and tenant again in case they have changed + err := k.findScopeAndTenant(resp, req.Raw()) + if err != nil { + // Error parsing challenge, doomed to fail. Return + return resp, cloneReqErr + } + + tk, err := k.mainResource.Get(as) + if err != nil { + return resp, err + } + + req.Raw().Header.Set( + headerAuthorization, + bearerHeader+tk.Token, + ) + + // send the original request now + return req.Next() + } + + return resp, err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) *string { + if url == "" { + return to.Ptr("") + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return &tenant +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +// sets the k.scope and k.tenantID from the WWW-Authenticate header +func (k *KeyVaultChallengePolicy) findScopeAndTenant(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.scope = &scope + return nil +} + +func (k KeyVaultChallengePolicy) getChallengeRequest(orig policy.Request) (*policy.Request, error) { + req, err := runtime.NewRequest(orig.Raw().Context(), orig.Raw().Method, orig.Raw().URL.String()) + if err != nil { + return nil, &challengePolicyError{err: err} + } + + req.Raw().Header = orig.Raw().Header + req.Raw().Header.Set("Content-Length", "0") + req.Raw().ContentLength = 0 + + copied := orig.Clone(orig.Raw().Context()) + copied.Raw().Body = req.Body() + copied.Raw().ContentLength = 0 + copied.Raw().Header.Set("Content-Length", "0") + err = copied.SetBody(streaming.NopCloser(bytes.NewReader([]byte{})), "application/json") + if err != nil { + return nil, &challengePolicyError{err: err} + } + copied.Raw().Header.Del("Content-Type") + + return copied, err +} + +type acquiringResourceState struct { + req *policy.Request + p *KeyVaultChallengePolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken( + state.req.Raw().Context(), + policy.TokenRequestOptions{ + Scopes: []string{*state.p.scope}, + }, + ) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml new file mode 100644 index 000000000..d72c65013 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/ci.keyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go new file mode 100644 index 000000000..cd94eb0d8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/constants.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +const ( + version = "v0.7.1" //nolint +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go new file mode 100644 index 000000000..d8f93492f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go new file mode 100644 index 000000000..8511832d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 546d46cf5..c7feb66b7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -47,6 +47,12 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets +# github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 @@ -1613,11 +1619,12 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.7 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient/mock_accountclient +sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/blobcontainerclient/mock_blobcontainerclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go index e533cf669..d2e654c85 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go @@ -62,3 +62,7 @@ func GetAzCoreClientOption(armConfig *ARMClientConfig) (*policy.ClientOptions, e } return &azCoreClientConfig, nil } + +func IsMultiTenant(armConfig *ARMClientConfig) bool { + return armConfig != nil && armConfig.NetworkResourceTenantID != "" && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go new file mode 100644 index 000000000..0af24a8bf --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/auxiliary_auth_policy.go @@ -0,0 +1,59 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armauth + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + HeaderAuthorizationAuxiliary = "x-ms-authorization-auxiliary" +) + +type AuxiliaryAuthPolicy struct { + credentials []azcore.TokenCredential + scope string +} + +func NewAuxiliaryAuthPolicy(credentials []azcore.TokenCredential, scope string) *AuxiliaryAuthPolicy { + return &AuxiliaryAuthPolicy{ + credentials: credentials, + scope: scope, + } +} + +func (p *AuxiliaryAuthPolicy) Do(req *policy.Request) (*http.Response, error) { + tokens := make([]string, 0, len(p.credentials)) + + for _, cred := range p.credentials { + token, err := cred.GetToken(context.TODO(), policy.TokenRequestOptions{ + Scopes: []string{p.scope}, + }) + if err != nil { + return nil, err + } + tokens = append(tokens, fmt.Sprintf("Bearer %s", token.Token)) + } + req.Raw().Header.Set(HeaderAuthorizationAuxiliary, strings.Join(tokens, ", ")) + return req.Next() +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go new file mode 100644 index 000000000..13352d729 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth/keyvault_credential.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armauth + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets" +) + +type KeyVaultCredential struct { + secretClient *azsecrets.Client + secretPath string + + token *azcore.AccessToken +} + +type KeyVaultCredentialSecret struct { + AccessToken string `json:"access_token"` + ExpiresOn time.Time `json:"expires_on"` +} + +func NewKeyVaultCredential( + msiCredential azcore.TokenCredential, + keyVaultURL string, + secretName string, +) (*KeyVaultCredential, error) { + cli, err := azsecrets.NewClient(keyVaultURL, msiCredential, nil) + if err != nil { + return nil, fmt.Errorf("create KeyVault client: %w", err) + } + + rv := &KeyVaultCredential{ + secretClient: cli, + secretPath: secretName, + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := rv.refreshToken(ctx); err != nil { + return nil, fmt.Errorf("refresh token: %w", err) + } + + return rv, nil +} + +func (c *KeyVaultCredential) refreshToken(ctx context.Context) error { + const LatestVersion = "" + + resp, err := c.secretClient.GetSecret(ctx, c.secretPath, LatestVersion, nil) + if err != nil { + return err + } + if resp.Value == nil { + return fmt.Errorf("secret value is nil") + } + + var secret KeyVaultCredentialSecret + if err := json.Unmarshal([]byte(*resp.Value), &secret); err != nil { + return fmt.Errorf("unmarshal secret value `%s`: %w", *resp.Value, err) + } + + c.token = &azcore.AccessToken{ + Token: secret.AccessToken, + ExpiresOn: secret.ExpiresOn, + } + + return nil +} + +func (c *KeyVaultCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + const RefreshTokenOffset = 5 * time.Minute + + if c.token != nil && c.token.ExpiresOn.Add(RefreshTokenOffset).Before(time.Now()) { + return *c.token, nil + } + + if err := c.refreshToken(ctx); err != nil { + return azcore.AccessToken{}, fmt.Errorf("refresh token: %w", err) + } + + return *c.token, nil +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go index 503efd3db..df6316a16 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go @@ -24,15 +24,21 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + + "sigs.k8s.io/cloud-provider-azure/pkg/azclient/armauth" ) type AuthProvider struct { - FederatedIdentityCredential azcore.TokenCredential - ManagedIdentityCredential azcore.TokenCredential - ClientSecretCredential azcore.TokenCredential + FederatedIdentityCredential azcore.TokenCredential + + ManagedIdentityCredential azcore.TokenCredential + ClientSecretCredential azcore.TokenCredential + ClientCertificateCredential azcore.TokenCredential + + NetworkTokenCredential azcore.TokenCredential NetworkClientSecretCredential azcore.TokenCredential - MultiTenantCredential azcore.TokenCredential - ClientCertificateCredential azcore.TokenCredential + + MultiTenantCredential azcore.TokenCredential } func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, clientOptionsMutFn ...func(option *policy.ClientOptions)) (*AuthProvider, error) { @@ -76,6 +82,20 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client } } + var ( + networkTokenCredential azcore.TokenCredential + ) + if config.UseManagedIdentityExtension && config.AuxiliaryTokenProvider != nil && IsMultiTenant(armConfig) { + networkTokenCredential, err = armauth.NewKeyVaultCredential( + managedIdentityCredential, + config.AuxiliaryTokenProvider.KeyVaultURL, + config.AuxiliaryTokenProvider.SecretName, + ) + if err != nil { + return nil, fmt.Errorf("create KeyVaultCredential for auxiliary token provider: %w", err) + } + } + // ClientSecretCredential is used for client secret var clientSecretCredential azcore.TokenCredential var networkClientSecretCredential azcore.TokenCredential @@ -88,7 +108,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, err } - if len(armConfig.NetworkResourceTenantID) > 0 && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { + if IsMultiTenant(armConfig) { credOptions := &azidentity.ClientSecretCredentialOptions{ ClientOptions: *clientOption, } @@ -128,7 +148,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client if err != nil { return nil, err } - if len(armConfig.NetworkResourceTenantID) > 0 && !strings.EqualFold(armConfig.NetworkResourceTenantID, armConfig.GetTenantID()) { + if IsMultiTenant(armConfig) { networkClientSecretCredential, err = azidentity.NewClientCertificateCredential(armConfig.NetworkResourceTenantID, config.GetAADClientID(), certificate, privateKey, credOptions) if err != nil { return nil, err @@ -150,6 +170,7 @@ func NewAuthProvider(armConfig *ARMClientConfig, config *AzureAuthConfig, client ClientSecretCredential: clientSecretCredential, ClientCertificateCredential: clientCertificateCredential, NetworkClientSecretCredential: networkClientSecretCredential, + NetworkTokenCredential: networkTokenCredential, MultiTenantCredential: multiTenantCredential, }, nil } @@ -173,6 +194,9 @@ func (factory *AuthProvider) GetNetworkAzIdentity() azcore.TokenCredential { if factory.NetworkClientSecretCredential != nil { return factory.NetworkClientSecretCredential } + if factory.NetworkTokenCredential != nil { + return factory.NetworkTokenCredential + } return nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go index 46a66e94f..aa7e464f2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth_conf.go @@ -42,6 +42,14 @@ type AzureAuthConfig struct { AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"` // Use workload identity federation for the virtual machine to access Azure ARM APIs UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"` + // Auxiliary token provider for accessing resources from network tenant + // Require MSI to be enabled and have permission to access the KeyVault + AuxiliaryTokenProvider *AzureAuthAuxiliaryTokenProvider `json:"auxiliaryTokenProvider,omitempty" yaml:"auxiliaryTokenProvider,omitempty"` +} + +type AzureAuthAuxiliaryTokenProvider struct { + KeyVaultURL string `json:"keyVaultURL,omitempty" yaml:"keyVaultURL,omitempty"` + SecretName string `json:"secretName" yaml:"secretName"` } func (config *AzureAuthConfig) GetAADClientID() string { From f9b2c29146331e0150799a5c21d20c67d42f5216 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Apr 2024 16:24:53 +0000 Subject: [PATCH 122/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.9 to 0.0.10. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.9...pkg/azclient/v0.0.10) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- vendor/modules.txt | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 87eb8327c..990e5b436 100644 --- a/go.mod +++ b/go.mod @@ -131,7 +131,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index de3a950a7..7d7d16bba 100644 --- a/go.sum +++ b/go.sum @@ -354,8 +354,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -468,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 h1:Dqfn77jXMi78OrCA0clZyqJtTeyxj7dBeF23lp/nT2s= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9/go.mod h1:/jwCAe6Nh26G687RvUbmd0CjW4GHHXI/xOAOCSF1XqQ= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 h1:bI+ZLfnsvExyxk/mmwmOnFHbfRyv0kc0Zk5sVh+w75A= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10/go.mod h1:WxbuPyPqXd+yNIkWNJ/gyjAZp/DXKGHDOdZH6DXsKRs= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index c7feb66b7..457ce19ea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -582,7 +582,7 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight @@ -1619,7 +1619,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.9 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From f0a6965072b072ea35217c2a93ebb99cbc1798c0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 8 Apr 2024 11:02:08 +0000 Subject: [PATCH 123/157] feat: upgrade to azcopy v10.24.0 for volume clone feature --- .trivyignore | 1 - pkg/blobplugin/Dockerfile | 4 ++-- test/sanity/run-test.sh | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) delete mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore deleted file mode 100644 index b0f56dcff..000000000 --- a/.trivyignore +++ /dev/null @@ -1 +0,0 @@ -CVE-2024-24786 diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index 5df86797d..b145383d9 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -44,9 +44,9 @@ else \ RUN tar xvzf aznfs.tar.gz -C / --keep-directory-symlink && rm aznfs.tar.gz # install azcopy -ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_amd64_10.23.0.tar.gz +ARG azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.24.0-20240326/azcopy_linux_amd64_10.24.0.tar.gz RUN if [ "$ARCH" == "arm64" ] ; then \ - azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_arm64_10.23.0.tar.gz; fi + azcopyURL=https://azcopyvnext.azureedge.net/releases/release-10.24.0-20240326/azcopy_linux_arm64_10.24.0.tar.gz; fi RUN wget -O azcopy.tar.gz ${azcopyURL} && \ tar xvzf azcopy.tar.gz -C . && rm azcopy.tar.gz && \ mv ./azcopy_linux_$ARCH_*/azcopy /usr/local/bin/azcopy && \ diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh index 17b37e1d2..af1bb91c7 100755 --- a/test/sanity/run-test.sh +++ b/test/sanity/run-test.sh @@ -36,7 +36,7 @@ azcopyPath="/usr/local/bin/azcopy" if [ ! -f "$azcopyPath" ]; then azcopyTarFile="azcopy.tar.gz" echo 'Downloading azcopy...' - wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.23.0-20240129/azcopy_linux_amd64_10.23.0.tar.gz + wget -O $azcopyTarFile azcopyvnext.azureedge.net/releases/release-10.24.0-20240326/azcopy_linux_amd64_10.24.0.tar.gz tar -zxvf $azcopyTarFile mv ./azcopy*/azcopy /usr/local/bin/azcopy rm -rf ./$azcopyTarFile From 0f08e585f9dc3270171af04a18f8fe180db25b0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:26:30 +0000 Subject: [PATCH 124/157] chore(deps): bump golang.org/x/net from 0.22.0 to 0.24.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.22.0 to 0.24.0. - [Commits](https://github.com/golang/net/compare/v0.22.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +- go.sum | 16 +- .../x/crypto/chacha20/chacha_ppc64le.s | 110 +++--- vendor/golang.org/x/crypto/ssh/server.go | 170 ++++++--- vendor/golang.org/x/net/http2/frame.go | 31 ++ vendor/golang.org/x/net/http2/pipe.go | 11 +- vendor/golang.org/x/net/http2/server.go | 13 +- vendor/golang.org/x/net/http2/testsync.go | 331 ++++++++++++++++++ vendor/golang.org/x/net/http2/transport.go | 298 ++++++++++++---- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 2 +- .../x/sys/unix/syscall_zos_s390x.go | 8 + .../x/sys/windows/syscall_windows.go | 82 +++++ .../golang.org/x/sys/windows/types_windows.go | 24 ++ .../x/sys/windows/zsyscall_windows.go | 126 ++++++- vendor/modules.txt | 8 +- 15 files changed, 1036 insertions(+), 202 deletions(-) create mode 100644 vendor/golang.org/x/net/http2/testsync.go diff --git a/go.mod b/go.mod index 990e5b436..95ba73dfe 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 - golang.org/x/net v0.22.0 + golang.org/x/net v0.24.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.33.0 k8s.io/api v0.29.3 @@ -128,12 +128,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.7.0 - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect diff --git a/go.sum b/go.sum index 7d7d16bba..59e0bace4 100644 --- a/go.sum +++ b/go.sum @@ -327,8 +327,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -345,8 +345,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -366,13 +366,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae25..c672ccf69 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c2dfe3268..e2ae4f891 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + // ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries @@ -439,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err var perms *Permissions authFailures := 0 + noneAuthCount := 0 var authErrs []error var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } userAuthLoop: for { @@ -471,6 +510,11 @@ userAuthLoop: return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) } + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + s.user = userAuthReq.User if !displayedBanner && config.BannerCallback != nil { @@ -491,20 +535,18 @@ userAuthLoop: switch userAuthReq.Method { case "none": - if config.NoClientAuth { + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { if config.NoClientAuthCallback != nil { perms, authErr = config.NoClientAuthCallback(s) } else { authErr = nil } } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } case "password": - if config.PasswordCallback == nil { + if authConfig.PasswordCallback == nil { authErr = errors.New("ssh: password auth not configured") break } @@ -518,17 +560,17 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } - perms, authErr = config.PasswordCallback(s, password) + perms, authErr = authConfig.PasswordCallback(s, password) case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { + if authConfig.KeyboardInteractiveCallback == nil { authErr = errors.New("ssh: keyboard-interactive auth not configured") break } prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) case "publickey": - if config.PublicKeyCallback == nil { + if authConfig.PublicKeyCallback == nil { authErr = errors.New("ssh: publickey auth not configured") break } @@ -562,11 +604,18 @@ userAuthLoop: if !ok { candidate.user = s.user candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } } cache.add(candidate) } @@ -578,8 +627,8 @@ userAuthLoop: if len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - - if candidate.result == nil { + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { okMsg := userAuthPubKeyOkMsg{ Algo: algo, PubKey: pubKeyData, @@ -629,11 +678,11 @@ userAuthLoop: perms = candidate.perms } case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { + if authConfig.GSSAPIWithMICConfig == nil { authErr = errors.New("ssh: gssapi-with-mic auth not configured") break } - gssapiConfig := config.GSSAPIWithMICConfig + gssapiConfig := authConfig.GSSAPIWithMICConfig userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) if err != nil { return nil, parseError(msgUserAuthRequest) @@ -689,49 +738,70 @@ userAuthLoop: break userAuthLoop } - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } } - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { + if authConfig.PasswordCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "password") } - if config.PublicKeyCallback != nil { + if authConfig.PublicKeyCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "publickey") } - if config.KeyboardInteractiveCallback != nil { + if authConfig.KeyboardInteractiveCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") } if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + return nil, errors.New("ssh: no authentication methods available") } if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d85..43557ab7e 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd..3b9f06b96 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408..ce2e8b40e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 000000000..61075bd16 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c2a5b44b3..ce375c8c7 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2922,7 +3076,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2964,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3150,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e5978..7f602ffd2 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c6..27c41b6f0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d..6525c62f3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6a..d8cb71db0 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c..5c6035ddf 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/modules.txt b/vendor/modules.txt index 457ce19ea..7718d9481 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -542,7 +542,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.21.0 +# golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -563,7 +563,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.22.0 +# golang.org/x/net v0.24.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -586,14 +586,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.19.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 From 77ef1d8576f882e143738c235d4eea2f4261c0f0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 9 Apr 2024 06:09:44 +0000 Subject: [PATCH 125/157] chore: vendor azblob InsecureAllowCredentialWithHTTP change --- go.mod | 4 +- go.sum | 8 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 7 + .../sdk/azcore/internal/pollers/loc/loc.go | 8 +- .../sdk/azcore/internal/pollers/util.go | 13 + .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/runtime/poller.go | 5 + .../sdk/storage/azblob/CHANGELOG.md | 81 +- .../sdk/storage/azblob/README.md | 2 +- .../sdk/storage/azblob/appendblob/client.go | 51 +- .../sdk/storage/azblob/assets.json | 2 +- .../sdk/storage/azblob/blob/client.go | 52 +- .../sdk/storage/azblob/blob/constants.go | 6 + .../sdk/storage/azblob/blob/models.go | 12 +- .../storage/azblob/bloberror/error_codes.go | 3 + .../storage/azblob/blockblob/chunkwriting.go | 72 +- .../sdk/storage/azblob/blockblob/client.go | 78 +- .../sdk/storage/azblob/blockblob/constants.go | 13 + .../sdk/storage/azblob/blockblob/models.go | 25 +- .../sdk/storage/azblob/ci.yml | 5 +- .../sdk/storage/azblob/client.go | 19 +- .../sdk/storage/azblob/common.go | 2 +- .../sdk/storage/azblob/container/client.go | 61 +- .../sdk/storage/azblob/container/models.go | 28 + .../sdk/storage/azblob/container/responses.go | 3 + .../storage/azblob/internal/base/clients.go | 59 +- .../azblob/internal/exported/blob_batch.go | 19 +- .../azblob/internal/exported/exported.go | 2 +- .../exported/shared_key_credential.go | 9 +- .../azblob/internal/exported/version.go | 4 +- .../internal/generated/appendblob_client.go | 19 +- .../azblob/internal/generated/autorest.md | 48 +- .../azblob/internal/generated/blob_client.go | 17 +- .../internal/generated/block_blob_client.go | 19 +- .../azblob/internal/generated/constants.go | 9 + .../internal/generated/container_client.go | 19 +- .../azblob/internal/generated/models.go | 76 + .../internal/generated/pageblob_client.go | 19 +- .../internal/generated/service_client.go | 19 +- .../generated/zz_appendblob_client.go | 283 ++-- .../internal/generated/zz_blob_client.go | 1461 ++++++++-------- .../internal/generated/zz_blockblob_client.go | 391 ++--- .../azblob/internal/generated/zz_constants.go | 129 +- .../internal/generated/zz_container_client.go | 701 ++++---- .../azblob/internal/generated/zz_models.go | 1217 +------------- .../internal/generated/zz_models_serde.go | 125 +- .../azblob/internal/generated/zz_options.go | 1469 +++++++++++++++++ .../internal/generated/zz_pageblob_client.go | 534 +++--- .../internal/generated/zz_response_types.go | 170 +- .../internal/generated/zz_service_client.go | 206 +-- .../internal/generated/zz_time_rfc1123.go | 23 +- .../internal/generated/zz_time_rfc3339.go | 35 +- .../internal/generated/zz_xml_helper.go | 26 +- .../azblob/internal/shared/batch_transfer.go | 16 +- .../azblob/internal/shared/buffer_manager.go | 70 + .../internal/shared/challenge_policy.go | 12 +- .../shared}/mmf_unix.go | 10 +- .../shared}/mmf_windows.go | 18 +- .../storage/azblob/internal/shared/shared.go | 27 +- .../sdk/storage/azblob/pageblob/client.go | 43 +- .../sdk/storage/azblob/pageblob/models.go | 2 +- .../sdk/storage/azblob/sas/account.go | 29 +- .../sdk/storage/azblob/sas/query_params.go | 14 +- .../sdk/storage/azblob/sas/service.go | 58 +- .../sdk/storage/azblob/service/client.go | 50 +- .../sdk/storage/azblob/service/models.go | 3 + vendor/modules.txt | 4 +- 67 files changed, 4611 insertions(+), 3415 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_unix.go (89%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_windows.go (82%) diff --git a/go.mod b/go.mod index 95ba73dfe..7e2e51706 100644 --- a/go.mod +++ b/go.mod @@ -6,12 +6,12 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 github.com/container-storage-interface/spec v1.8.0 diff --git a/go.sum b/go.sum index 59e0bace4..d0451345b 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 h1:U/kwEXj0Y+1REAkV4kV8VO1CsEp8tSaQDG/7qC5XuqQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= @@ -35,8 +35,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 6f32eda55..a6675492b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,12 @@ # Release History +## 1.11.1 (2024-04-02) + +### Bugs Fixed + +* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure. +* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal. + ## 1.11.0 (2024-04-01) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index b0e62b1df..7a56c5211 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -103,10 +103,10 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } else if resp.StatusCode > 199 && resp.StatusCode < 300 { // any 2xx other than a 202 indicates success p.CurState = poller.StatusSucceeded - } else if resp.StatusCode == http.StatusTooManyRequests { - // the request is being throttled. DO NOT include - // this as a terminal failure. preserve the - // existing state and return the response. + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. } else { p.CurState = poller.StatusFailed } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index 08e8d24e4..eb3cf651d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -185,3 +185,16 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { } return nil } + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 4d3465109..03691cbf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.11.0" + Version = "v1.11.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index b0be7e5e7..03f76c9aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -334,6 +334,11 @@ func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } // the LRO failed. record the error p.err = err } else if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 169e46972..f6c99441b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,85 @@ # Release History +## 1.3.2 (2024-04-09) + +### Bugs Fixed +* Fixed an issue where GetSASURL() was providing HTTPS SAS, instead of the default http+https SAS. Fixes [#22448](https://github.com/Azure/azure-sdk-for-go/issues/22448) + +### Other Changes +* Integrate `InsecureAllowCredentialWithHTTP` client options. +* Update dependencies. + +## 1.3.1 (2024-02-28) + +### Bugs Fixed + +* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints. +* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426). + +## 1.3.0 (2024-02-12) + +### Bugs Fixed +* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). +* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). +* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). + +### Other Changes +* Updated the version of `azcore` to `1.9.2` + +## 1.3.0-beta.1 (2024-01-09) + +### Features Added + +* Updated service version to `2023-11-03`. +* Added support for Audience when OAuth is used. + +### Bugs Fixed + +* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). + +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + +## 1.2.0 (2023-10-11) + +### Bugs Fixed +* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. + +## 1.2.0-beta.1 (2023-09-18) + +### Features Added +* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 +* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). +* Added `CopySourceTag` option for `UploadBlobFromURLOptions` +* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. +* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). +* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS +* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. +* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. + +### Bugs Fixed +* Fixed issue where some requests fail with mismatch in string to sign. +* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). + +### Other Changes +* Updating version of azcore to 1.6.0. + ## 1.1.0 (2023-07-13) ### Features Added @@ -15,7 +95,6 @@ * Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). * Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. - * Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 2759340f5..1f51959fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,6 +1,6 @@ # Azure Blob Storage module for Go -> Service Version: 2020-10-02 +> Service Version: 2023-11-03 Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index fc96b67de..06b0fd419 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -14,12 +14,14 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -33,14 +35,17 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, runtime.PipelineOptions{}, - &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,12 +54,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -64,13 +70,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -130,7 +137,7 @@ func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -142,7 +149,7 @@ func (ab *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. @@ -333,6 +340,12 @@ func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index 4db1d7209..d971ff1ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_a772b9c866" + "Tag": "go/storage/azblob_71b0a04c12" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 98c166aaf..7b55cd143 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -8,15 +8,16 @@ package blob import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "os" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -35,12 +36,16 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, &cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,9 +54,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -61,10 +69,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -102,6 +113,10 @@ func (b *Client) credential() any { return base.Credential((*base.Client[generated.BlobClient])(b)) } +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -116,7 +131,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -128,7 +143,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -261,8 +276,8 @@ func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetL // CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) return resp, err } @@ -314,8 +329,8 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o // Concurrent Download Functions ----------------------------------------------------------------------------------------- -// download downloads an Azure blob to a WriterAt in parallel. -func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { if o.BlockSize == 0 { o.BlockSize = DefaultDownloadBlockSize } @@ -343,6 +358,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, + NumChunks: uint64(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ @@ -409,7 +425,7 @@ func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadB if o == nil { o = &DownloadBufferOptions{} } - return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) } // DownloadFile downloads an Azure blob to a local file. @@ -448,7 +464,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.download(ctx, file, *do) + return b.downloadBuffer(ctx, file, *do) } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go index c15635448..daef800ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -9,6 +9,7 @@ package blob import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -18,6 +19,9 @@ const ( // DefaultDownloadBlockSize is default block size DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency ) // BlobType defines values for BlobType @@ -53,6 +57,7 @@ type AccessTier = generated.AccessTier const ( AccessTierArchive AccessTier = generated.AccessTierArchive AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold AccessTierHot AccessTier = generated.AccessTierHot AccessTierP10 AccessTier = generated.AccessTierP10 AccessTierP15 AccessTier = generated.AccessTierP15 @@ -148,6 +153,7 @@ type ArchiveStatus = generated.ArchiveStatus const ( ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold ) // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index fea2f1ec5..d73346889 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -51,7 +51,7 @@ type Tags = generated.BlobTag // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange // Request Model Declaration ------------------------------------------------------------------------------------------- @@ -458,7 +458,7 @@ type SetImmutabilityPolicyOptions struct { func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { if o == nil { - return nil, nil + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil } ac := &exported.BlobAccessConditions{ ModifiedAccessConditions: o.ModifiedAccessConditions, @@ -544,11 +544,13 @@ type CopyFromURLOptions struct { SourceModifiedAccessConditions *SourceModifiedAccessConditions BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo } -func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { if o == nil { - return nil, nil, nil, nil + return nil, nil, nil, nil, nil } options := &generated.BlobClientCopyFromURLOptions{ @@ -563,7 +565,7 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index ad653c1c4..07fad6061 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -69,6 +69,7 @@ const ( CopyIDMismatch Code = "CopyIdMismatch" EmptyMetadataKey Code = "EmptyMetadataKey" FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" @@ -122,6 +123,7 @@ const ( NoAuthenticationInformation Code = "NoAuthenticationInformation" NoPendingCopyOperation Code = "NoPendingCopyOperation" OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" OperationTimedOut Code = "OperationTimedOut" OutOfRangeInput Code = "OutOfRangeInput" OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" @@ -153,4 +155,5 @@ const ( var ( // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 340d4bc76..24df42c75 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. @@ -28,27 +29,8 @@ type blockWriter interface { CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } -// bufferManager provides an abstraction for the management of buffers. -// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. -type bufferManager[T ~[]byte] interface { - // Acquire returns the channel that contains the pool of buffers. - Acquire() <-chan T - - // Release releases the buffer back to the pool for reuse/cleanup. - Release(T) - - // Grow grows the number of buffers, up to the predefined max. - // It returns the total number of buffers or an error. - // No error is returned if the number of buffers has reached max. - // This is called only from the reading goroutine. - Grow() (int, error) - - // Free cleans up all buffers. - Free() -} - // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) { +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { options.setDefaults() wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing @@ -93,7 +75,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } var n int - n, err = io.ReadFull(src, buffer) + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) if n > 0 { // some data was read, upload it @@ -126,7 +108,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } if err != nil { // The reader is done, no more outgoing buffers - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + if errors.Is(err, io.EOF) { // these are expected errors, we don't surface those err = nil } else { @@ -265,49 +247,3 @@ func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { func (ubi uuidBlockID) ToBase64() string { return blockID(ubi).ToBase64() } - -// mmbPool implements the bufferManager interface. -// it uses anonymous memory mapped files for buffers. -// don't use this type directly, use newMMBPool() instead. -type mmbPool struct { - buffers chan mmb - count int - max int - size int64 -} - -func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { - return &mmbPool{ - buffers: make(chan mmb, maxBuffers), - max: maxBuffers, - size: bufferSize, - } -} - -func (pool *mmbPool) Acquire() <-chan mmb { - return pool.buffers -} - -func (pool *mmbPool) Grow() (int, error) { - if pool.count < pool.max { - buffer, err := newMMB(pool.size) - if err != nil { - return 0, err - } - pool.buffers <- buffer - pool.count++ - } - return pool.count, nil -} - -func (pool *mmbPool) Release(buffer mmb) { - pool.buffers <- buffer -} - -func (pool *mmbPool) Free() { - for i := 0; i < pool.count; i++ { - buffer := <-pool.buffers - buffer.delete() - } - pool.count = 0 -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index d7e5aa0dc..8901f1dbd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -14,20 +14,24 @@ import ( "io" "math" "os" + "reflect" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -41,12 +45,16 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -55,9 +63,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -67,10 +79,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -115,7 +131,7 @@ func (bb *Client) URL() string { return bb.generated().Endpoint() } -// BlobClient returns the embedded blob client for this AppendBlob client. +// BlobClient returns the embedded blob client for this BlockBlob client. func (bb *Client) BlobClient() *blob.Client { blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) return (*blob.Client)(blobClient) @@ -130,7 +146,7 @@ func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -142,7 +158,7 @@ func (bb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // Upload creates a new block blob or overwrites an existing block blob. @@ -160,6 +176,13 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) return resp, err } @@ -248,6 +271,11 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, } + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum + } + headers = options.HTTPHeaders leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) cpkInfo = options.CPKInfo @@ -384,6 +412,12 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return bb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Upload Functions ----------------------------------------------------------------------------------------- // uploadFromReader uploads a buffer in blocks to a block blob. @@ -440,6 +474,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu OperationName: "uploadFromReader", TransferSize: actualSize, ChunkSize: o.BlockSize, + NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. @@ -494,6 +529,12 @@ func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBuff if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) } @@ -507,6 +548,12 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) } @@ -517,7 +564,12 @@ func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStr o = &UploadStreamOptions{} } - result, err := copyFromReader(ctx, body, bb, *o, newMMBPool) + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum + } + + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) if err != nil { return CommitBlockListResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go index cb1162640..ce3a5d8de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -37,3 +37,16 @@ const ( func PossibleBlockListTypeValues() []BlockListType { return generated.PossibleBlockListTypeValues() } + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go index ba1b9ee9f..453d569e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -36,8 +36,9 @@ type UploadOptions struct { // Optional. Indicates the tier to be set on the blob. Tier *blob.AccessTier - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo @@ -46,6 +47,9 @@ type UploadOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte } func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, @@ -81,6 +85,9 @@ type UploadBlobFromURLOptions struct { // Optional, default is true. Indicates if properties from the source blob should be copied. CopySourceBlobProperties *bool + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + // Optional. Specifies a user-defined name-value pair associated with the blob. Metadata map[string]*string @@ -109,6 +116,7 @@ func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFr BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), CopySourceAuthorization: o.CopySourceAuthorization, CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, Metadata: o.Metadata, SourceContentMD5: o.SourceContentMD5, Tier: o.Tier, @@ -190,8 +198,6 @@ type CommitBlockListOptions struct { RequestID *string Tier *blob.AccessTier Timeout *int32 - TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo CPKScopeInfo *blob.CPKScopeInfo @@ -199,6 +205,12 @@ type CommitBlockListOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated + TransactionalContentCRC64 []byte + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte } // --------------------------------------------------------------------------------------------------------------------- @@ -253,9 +265,10 @@ type uploadFromReaderOptions struct { TransactionalValidation blob.TransferValidationType - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + // Deprecated: TransactionalContentCRC64 cannot be generated at block level TransactionalContentCRC64 uint64 - // Specify the transactional md5 for the body, to be validated by the service. + + // Deprecated: TransactionalContentMD5 cannot be generated at block level TransactionalContentMD5 []byte } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index f5100e131..2259336b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -21,11 +21,12 @@ pr: - sdk/storage/azblob -stages: - - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + UsePipelineProxy: false EnvVars: AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 5c4b719c4..c511d8a79 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -31,11 +31,7 @@ type Client struct { // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -50,11 +46,7 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -83,15 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti // - connectionString - a connection string for the desired storage account // - options - client options; pass nil to accept the default values func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - if options == nil { - options = &ClientOptions{} - } - containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) if err != nil { return nil, err } return &Client{ - svc: containerClient, + svc: svcClient, }, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go index 560e151d5..48771e8c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) { // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 2ac5a820a..0e43ed015 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -11,8 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" "net/url" "time" @@ -20,8 +18,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" @@ -42,12 +42,16 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, &cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -56,9 +60,12 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -68,10 +75,13 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -113,6 +123,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (c *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() @@ -124,7 +138,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.credential())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -133,7 +147,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client { func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of @@ -142,7 +156,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of @@ -151,7 +165,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. @@ -272,7 +286,7 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L if err != nil { return ListBlobsFlatResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsFlatResponse{}, err } @@ -308,7 +322,7 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar if err != nil { return ListBlobsHierarchyResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsHierarchyResponse{}, err } @@ -334,7 +348,6 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim // Containers do not have snapshots, nor versions. qps, err := sas.BlobSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, ContainerName: urlParts.ContainerName, Permissions: permissions.String(), StartTime: st, @@ -357,7 +370,8 @@ func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := c.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := c.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: @@ -412,3 +426,12 @@ func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *Sub Version: resp.Version, }, nil } + +// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + containerClientFilterBlobsOptions := o.format() + resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index f1724861e..61d936ab7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -397,3 +397,31 @@ type SubmitBatchOptions struct { func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { return nil } + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FilterBlobs. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ContainerClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go index 4d1e406ea..9aaefe277 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -64,3 +64,6 @@ type SubmitBatchResponse struct { // BatchResponseItem contains the response for the individual sub-requests. type BatchResponseItem = exported.BatchResponseItem + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 6c3cc5c93..073de855b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -8,19 +8,26 @@ package base import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" ) // ClientOptions contains the optional parameters when creating a Client. type ClientOptions struct { azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string } type Client[T any] struct { inner *T credential any + options *ClientOptions } func InnerClient[T any](client *Client[T]) *T { @@ -40,28 +47,43 @@ func Credential[T any](client *Client[T]) any { return client.credential } +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } +} + func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, pipeline), + inner: generated.NewServiceClient(containerURL, azClient), credential: credential, + options: options, } } -func NewContainerClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, pipeline), + inner: generated.NewContainerClient(containerURL, azClient), credential: credential, + options: options, } } -func NewBlobClient(blobURL string, pipeline runtime.Pipeline, credential any) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, pipeline), + inner: generated.NewBlobClient(blobURL, azClient), credential: credential, + options: options, } } @@ -72,29 +94,32 @@ type CompositeClient[T, U any] struct { } func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{inner: client.innerT}, client.innerU + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU } -func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewAppendBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewBlockBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewPageBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), sharedKey: sharedKey, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go index 64a88688a..c26c62aa8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -11,12 +11,6 @@ import ( "bytes" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "io" "mime" "mime/multipart" @@ -24,6 +18,13 @@ import ( "net/textproto" "strconv" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -45,11 +46,11 @@ func createBatchID() (string, error) { // buildSubRequest is used for building the sub-request. Example: // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func buildSubRequest(req *policy.Request) []byte { var batchSubRequest strings.Builder - blobPath := req.Raw().URL.Path + blobPath := req.Raw().URL.EscapedPath() if len(req.Raw().URL.RawQuery) > 0 { blobPath += "?" + req.Raw().URL.RawQuery } @@ -80,7 +81,7 @@ func buildSubRequest(req *policy.Request) []byte { // // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { batchID, err := createBatchID() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go index 9bc1ca47d..d0355727c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -13,7 +13,7 @@ import ( // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange struct { Offset int64 Count int64 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index d15631054..adf46b068 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -172,7 +172,7 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er // Join the sorted key values separated by ',' // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) } } return cr.String(), nil @@ -195,6 +195,13 @@ func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { } func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index f27f52983..720d6e8fd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -7,6 +7,6 @@ package exported const ( - ModuleName = "azblob" - ModuleVersion = "v1.1.0" + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.3.2" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go index 3b6184fea..288df7edd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *AppendBlobClient) Endpoint() string { return client.endpoint } -func (client *AppendBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 3044a608a..92dc7e2d3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,7 +7,7 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" credential-scope: "https://storage.azure.com/.default" output-folder: ../generated file-prefix: "zz_" @@ -19,7 +19,43 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.45" +use: "@autorest/go@4.0.0-preview.61" +``` + +### Updating service version to 2023-11-03 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). + replaceAll(`2021-12-02`, `2023-11-03`); +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; ``` ### Remove pager methods and export various generated methods in container client @@ -244,7 +280,9 @@ directive: ``` yaml directive: -- from: zz_models.go +- from: + - zz_models.go + - zz_options.go where: $ transform: >- return $. @@ -407,8 +445,8 @@ directive: where: $ transform: >- return $. - replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, - `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`); + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); ``` ### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go index 478962de1..343073b2e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -8,8 +8,8 @@ package generated import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "time" ) @@ -20,8 +20,8 @@ func (client *BlobClient) Endpoint() string { return client.endpoint } -func (client *BlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal } func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { @@ -31,3 +31,14 @@ func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *Blob func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) } + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go index a43e327ec..873d9a419 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *BlockBlobClient) Endpoint() string { return client.endpoint } -func (client *BlockBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 000000000..8f2bbbb7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2023-11-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go index bbbf828a0..d43b2c782 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ContainerClient) Endpoint() string { return client.endpoint } -func (client *ContainerClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go index 759d92630..aaef9f53b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -6,6 +6,12 @@ package generated +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + type TransactionalContentSetter interface { SetCRC64([]byte) SetMD5([]byte) @@ -35,6 +41,14 @@ func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { p.TransactionalContentMD5 = v } +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + type SourceContentSetter interface { SetSourceContentCRC64(v []byte) SetSourceContentMD5(v []byte) @@ -63,3 +77,65 @@ func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { p.SourceContentMD5 = v } + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go index 8a212cc3d..a7c76208a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *PageBlobClient) Endpoint() string { return client.endpoint } -func (client *PageBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go index 1f449b955..32c15a2b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ServiceClient) Endpoint() string { return client.endpoint } -func (client *ServiceClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index d0ddb9064..797318611 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,21 +21,10 @@ import ( ) // AppendBlobClient contains the methods for the AppendBlob group. -// Don't use this type directly, use NewAppendBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type AppendBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { - client := &AppendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append @@ -44,7 +32,7 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - body - Initial data // - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. @@ -55,18 +43,21 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err } - return client.appendBlockHandleResponse(resp) + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err } // appendBlockCreateRequest creates the AppendBlock request. @@ -124,33 +115,33 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // appendBlockHandleResponse handles the AppendBlock response. func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -159,14 +150,12 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -175,16 +164,14 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -193,11 +180,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -207,7 +201,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - sourceURL - Specify a URL to the copy source. // - contentLength - The length of the request. // - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL @@ -221,18 +215,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err } - return client.appendBlockFromURLHandleResponse(resp) + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. @@ -309,7 +306,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -323,22 +320,16 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -347,11 +338,12 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -360,16 +352,8 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -384,13 +368,26 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.IsServerEncrypted = &isServerEncrypted } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. @@ -399,18 +396,21 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -481,7 +481,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -504,15 +504,8 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content // createHandleResponse handles the Create response. func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -521,18 +514,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -540,6 +521,15 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -547,11 +537,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -560,25 +560,28 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock // method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err } - return client.sealHandleResponse(resp) + resp, err := client.sealHandleResponse(httpResp) + return resp, err } // sealCreateRequest creates the Seal request. @@ -593,7 +596,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -622,25 +625,9 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * // sealHandleResponse handles the Seal response. func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -648,6 +635,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { @@ -655,5 +645,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.IsSealed = &isSealed } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 67402b5a5..fe568a96c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -23,44 +22,36 @@ import ( ) // BlobClient contains the methods for the Blob group. -// Don't use this type directly, use NewBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlobClient creates a new instance of BlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { - client := &BlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err } - return client.abortCopyFromURLHandleResponse(resp) + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. @@ -80,7 +71,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -94,12 +85,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -107,31 +92,40 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -166,7 +160,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -177,6 +171,16 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio // acquireLeaseHandleResponse handles the AcquireLease response. func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -190,44 +194,37 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -261,7 +258,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -272,6 +269,16 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * // breakLeaseHandleResponse handles the BreakLease response. func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -290,29 +297,19 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -320,18 +317,21 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -364,7 +364,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -375,6 +375,16 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID // changeLeaseHandleResponse handles the ChangeLease response. func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -385,25 +395,15 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -411,7 +411,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -420,23 +420,27 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err } - return client.copyFromURLHandleResponse(resp) + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -488,7 +492,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -510,6 +514,12 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -517,34 +527,22 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour // copyFromURLHandleResponse handles the CopyFromURL response. func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.Date = &date + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val @@ -552,19 +550,34 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -572,25 +585,28 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err } - return client.createSnapshotHandleResponse(resp) + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err } // createSnapshotCreateRequest creates the CreateSnapshot request. @@ -642,7 +658,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -653,12 +669,26 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio // createSnapshotHandleResponse handles the CreateSnapshot response. func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { @@ -666,32 +696,18 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } return result, nil } @@ -708,23 +724,26 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -768,7 +787,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -782,12 +801,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -795,28 +808,37 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err } - return client.deleteImmutabilityPolicyHandleResponse(resp) + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. @@ -831,7 +853,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -845,12 +867,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -858,6 +874,12 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -865,24 +887,27 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err } - return client.downloadHandleResponse(resp) + resp, err := client.downloadHandleResponse(httpResp) + return resp, err } // downloadCreateRequest creates the Download request. @@ -939,7 +964,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -950,76 +975,75 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl // downloadHandleResponse handles the Download response. func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientDownloadResponse{}, err } - result.LastModified = &lastModified - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1028,9 +1052,6 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1043,36 +1064,15 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1081,20 +1081,8 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1102,19 +1090,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.TagCount = &tagCount + result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1123,6 +1117,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.IsSealed = &isSealed } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1130,15 +1131,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1147,15 +1154,40 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -1163,21 +1195,24 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -1190,7 +1225,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1198,15 +1233,12 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1214,11 +1246,14 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1227,24 +1262,27 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -1291,7 +1329,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1302,82 +1340,61 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option // getPropertiesHandleResponse handles the GetProperties response. func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.LastModified = &lastModified + result.AccessTierChangeTime = &accessTierChangeTime } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CreationTime = &creationTime - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } + result.AccessTierInferred = &accessTierInferred } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsIncrementalCopy = &isIncrementalCopy + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { - result.DestinationSnapshot = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val } if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) @@ -1386,12 +1403,6 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentLength = &contentLength } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -1399,58 +1410,50 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.Date = &date + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + result.CreationTime = &creationTime } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1458,28 +1461,22 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val + result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierChangeTime = &accessTierChangeTime + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) @@ -1488,19 +1485,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsCurrentVersion = &isCurrentVersion } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ExpiresOn = &expiresOn + result.IsIncrementalCopy = &isIncrementalCopy } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1509,8 +1499,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsSealed = &isSealed } - if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { - result.RehydratePriority = &val + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) @@ -1519,15 +1513,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1536,29 +1536,70 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LegalHold = &legalHold } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } return result, nil } // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err } - return client.getTagsHandleResponse(resp) + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err } // getTagsCreateRequest creates the GetTags request. @@ -1579,7 +1620,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1599,12 +1640,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1612,6 +1647,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { return BlobClientGetTagsResponse{}, err } @@ -1621,24 +1662,27 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err } - return client.queryHandleResponse(resp) + resp, err := client.queryHandleResponse(httpResp) + return resp, err } // queryCreateRequest creates the Query request. @@ -1684,13 +1728,16 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *options.QueryRequest) + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil } return req, nil } @@ -1698,65 +1745,75 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC // queryHandleResponse handles the Query response. func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientQueryResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1765,9 +1822,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1780,26 +1834,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1808,13 +1844,14 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -1823,25 +1860,35 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1849,23 +1896,26 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1897,7 +1947,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1908,6 +1958,16 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1918,45 +1978,38 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1988,7 +2041,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1999,6 +2052,16 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s // renewLeaseHandleResponse handles the RenewLease response. func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2012,44 +2075,37 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err } - return client.setExpiryHandleResponse(resp) + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err } // setExpiryCreateRequest creates the SetExpiry request. @@ -2064,7 +2120,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2079,6 +2135,16 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti // setExpiryHandleResponse handles the SetExpiry response. func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2089,46 +2155,39 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } return result, nil } // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err } - return client.setHTTPHeadersHandleResponse(resp) + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. @@ -2179,7 +2238,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2190,16 +2249,6 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -2210,12 +2259,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2223,29 +2266,48 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err } - return client.setImmutabilityPolicyHandleResponse(resp) + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. @@ -2260,7 +2322,7 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2283,12 +2345,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2306,28 +2362,37 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err } - return client.setLegalHoldHandleResponse(resp) + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err } // setLegalHoldCreateRequest creates the SetLegalHold request. @@ -2342,7 +2407,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2357,12 +2422,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2377,6 +2436,12 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC } result.LegalHold = &legalHold } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2384,25 +2449,28 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // pairs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -2454,7 +2522,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2465,28 +2533,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options // setMetadataHandleResponse handles the SetMetadata response. func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2494,6 +2543,15 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -2501,11 +2559,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -2513,24 +2581,27 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - tags - Blob tags // - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err } - return client.setTagsHandleResponse(resp) + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err } // setTagsCreateRequest creates the SetTags request. @@ -2548,7 +2619,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } @@ -2565,7 +2636,10 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, tags) + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil } // setTagsHandleResponse handles the SetTags response. @@ -2574,12 +2648,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2587,6 +2655,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2596,24 +2670,27 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - tier - Indicates the tier to be set on the blob. // - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err } - return client.setTierHandleResponse(resp) + resp, err := client.setTierHandleResponse(httpResp) + return resp, err } // setTierCreateRequest creates the SetTier request. @@ -2638,7 +2715,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2670,7 +2747,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -2680,18 +2757,21 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err } - return client.startCopyFromURLHandleResponse(resp) + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. @@ -2752,7 +2832,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2778,6 +2858,22 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop // startCopyFromURLHandleResponse handles the StartCopyFromURL response. func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2788,9 +2884,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -2800,40 +2893,30 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err } - return client.undeleteHandleResponse(resp) + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err } // undeleteCreateRequest creates the Undelete request. @@ -2848,7 +2931,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2862,12 +2945,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2875,5 +2952,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 7e81d1540..b6115b50a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,21 +21,10 @@ import ( ) // BlockBlobClient contains the methods for the BlockBlob group. -// Don't use this type directly, use NewBlockBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlockBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { - client := &BlockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. @@ -48,7 +36,7 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // belong to. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - blocks - Blob Blocks. // - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList // method. @@ -58,18 +46,21 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err } - return client.commitBlockListHandleResponse(resp) + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err } // commitBlockListCreateRequest creates the CommitBlockList request. @@ -148,7 +139,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -165,21 +156,24 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, blocks) + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil } // commitBlockListHandleResponse handles the CommitBlockList response. func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -188,44 +182,44 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.Date = &date } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -233,24 +227,27 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. // - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err } - return client.getBlockListHandleResponse(resp) + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err } // getBlockListCreateRequest creates the GetBlockList request. @@ -275,7 +272,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -286,28 +283,35 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li // getBlockListHandleResponse handles the GetBlockList response. func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.LastModified = &lastModified + result.BlobContentLength = &blobContentLength } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.BlobContentLength = &blobContentLength + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -315,13 +319,6 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -335,7 +332,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request @@ -350,18 +347,21 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err } - return client.putBlobFromURLHandleResponse(resp) + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. @@ -453,7 +453,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -470,6 +470,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -477,15 +480,8 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, // putBlobFromURLHandleResponse handles the PutBlobFromURL response. func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -494,18 +490,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -513,6 +497,15 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -520,11 +513,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -532,7 +535,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -543,18 +546,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err } - return client.stageBlockHandleResponse(resp) + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err } // stageBlockCreateRequest creates the StageBlock request. @@ -592,17 +598,30 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // stageBlockHandleResponse handles the StageBlock response. func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -610,15 +629,6 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -626,12 +636,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -640,11 +649,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -653,7 +662,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -667,18 +676,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err } - return client.stageBlockFromURLHandleResponse(resp) + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. @@ -732,7 +744,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -746,12 +758,8 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex // stageBlockFromURLHandleResponse handles the StageBlockFromURL response. func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -760,14 +768,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -776,6 +782,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -783,11 +795,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -798,7 +810,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - body - Initial data // - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. @@ -808,18 +820,21 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err } - return client.uploadHandleResponse(resp) + resp, err := client.uploadHandleResponse(httpResp) + return resp, err } // uploadCreateRequest creates the Upload request. @@ -896,7 +911,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -912,22 +927,21 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadHandleResponse handles the Upload response. func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -936,18 +950,6 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -955,6 +957,15 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -962,11 +973,21 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 74e6cf1e8..95af9e154 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -13,6 +12,7 @@ type AccessTier string const ( AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" AccessTierCool AccessTier = "Cool" AccessTierHot AccessTier = "Hot" AccessTierP10 AccessTier = "P10" @@ -33,6 +33,7 @@ const ( func PossibleAccessTierValues() []AccessTier { return []AccessTier{ AccessTierArchive, + AccessTierCold, AccessTierCool, AccessTierHot, AccessTierP10, @@ -53,27 +54,28 @@ func PossibleAccessTierValues() []AccessTier { type AccountKind string const ( - AccountKindStorage AccountKind = "Storage" AccountKindBlobStorage AccountKind = "BlobStorage" - AccountKindStorageV2 AccountKind = "StorageV2" - AccountKindFileStorage AccountKind = "FileStorage" AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" ) // PossibleAccountKindValues returns the possible values for the AccountKind const type. func PossibleAccountKindValues() []AccountKind { return []AccountKind{ - AccountKindStorage, AccountKindBlobStorage, - AccountKindStorageV2, - AccountKindFileStorage, AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, } } type ArchiveStatus string const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" ) @@ -81,25 +83,41 @@ const ( // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. func PossibleArchiveStatusValues() []ArchiveStatus { return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot, } } +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string const ( - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" ) // PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { return []BlobGeoReplicationStatus{ - BlobGeoReplicationStatusLive, BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, BlobGeoReplicationStatusUnavailable, } } @@ -107,53 +125,53 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { type BlobType string const ( + BlobTypeAppendBlob BlobType = "AppendBlob" BlobTypeBlockBlob BlobType = "BlockBlob" BlobTypePageBlob BlobType = "PageBlob" - BlobTypeAppendBlob BlobType = "AppendBlob" ) // PossibleBlobTypeValues returns the possible values for the BlobType const type. func PossibleBlobTypeValues() []BlobType { return []BlobType{ + BlobTypeAppendBlob, BlobTypeBlockBlob, BlobTypePageBlob, - BlobTypeAppendBlob, } } type BlockListType string const ( + BlockListTypeAll BlockListType = "all" BlockListTypeCommitted BlockListType = "committed" BlockListTypeUncommitted BlockListType = "uncommitted" - BlockListTypeAll BlockListType = "all" ) // PossibleBlockListTypeValues returns the possible values for the BlockListType const type. func PossibleBlockListTypeValues() []BlockListType { return []BlockListType{ + BlockListTypeAll, BlockListTypeCommitted, BlockListTypeUncommitted, - BlockListTypeAll, } } type CopyStatusType string const ( - CopyStatusTypePending CopyStatusType = "pending" - CopyStatusTypeSuccess CopyStatusType = "success" CopyStatusTypeAborted CopyStatusType = "aborted" CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" ) // PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. func PossibleCopyStatusTypeValues() []CopyStatusType { return []CopyStatusType{ - CopyStatusTypePending, - CopyStatusTypeSuccess, CopyStatusTypeAborted, CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, } } @@ -190,15 +208,15 @@ func PossibleDeleteTypeValues() []DeleteType { type EncryptionAlgorithmType string const ( - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" ) // PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return []EncryptionAlgorithmType{ - EncryptionAlgorithmTypeNone, EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, } } @@ -221,50 +239,65 @@ func PossibleExpiryOptionsValues() []ExpiryOptions { } } +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + type ImmutabilityPolicyMode string const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ) // PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, ImmutabilityPolicyModeMutable, ImmutabilityPolicyModeUnlocked, - ImmutabilityPolicyModeLocked, } } type ImmutabilityPolicySetting string const ( - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ) // PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { return []ImmutabilityPolicySetting{ - ImmutabilityPolicySettingUnlocked, ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, } } type LeaseDurationType string const ( - LeaseDurationTypeInfinite LeaseDurationType = "infinite" LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" ) // PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. func PossibleLeaseDurationTypeValues() []LeaseDurationType { return []LeaseDurationType{ - LeaseDurationTypeInfinite, LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, } } @@ -272,20 +305,20 @@ type LeaseStateType string const ( LeaseStateTypeAvailable LeaseStateType = "available" - LeaseStateTypeLeased LeaseStateType = "leased" - LeaseStateTypeExpired LeaseStateType = "expired" LeaseStateTypeBreaking LeaseStateType = "breaking" LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" ) // PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. func PossibleLeaseStateTypeValues() []LeaseStateType { return []LeaseStateType{ LeaseStateTypeAvailable, - LeaseStateTypeLeased, - LeaseStateTypeExpired, LeaseStateTypeBreaking, LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, } } @@ -309,14 +342,14 @@ type ListBlobsIncludeItem string const ( ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" - ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" - ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" - ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" ) // PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. @@ -324,30 +357,30 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { return []ListBlobsIncludeItem{ ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions, - ListBlobsIncludeItemTags, - ListBlobsIncludeItemImmutabilitypolicy, - ListBlobsIncludeItemLegalhold, - ListBlobsIncludeItemDeletedwithversions, } } type ListContainersIncludeType string const ( - ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeSystem ListContainersIncludeType = "system" ) // PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { return []ListContainersIncludeType{ - ListContainersIncludeTypeMetadata, ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, ListContainersIncludeTypeSystem, } } @@ -404,18 +437,18 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { type QueryFormatType string const ( + QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeDelimited QueryFormatType = "delimited" QueryFormatTypeJSON QueryFormatType = "json" - QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeParquet QueryFormatType = "parquet" ) // PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. func PossibleQueryFormatTypeValues() []QueryFormatType { return []QueryFormatType{ + QueryFormatTypeArrow, QueryFormatTypeDelimited, QueryFormatTypeJSON, - QueryFormatTypeArrow, QueryFormatTypeParquet, } } @@ -440,38 +473,38 @@ func PossibleRehydratePriorityValues() []RehydratePriority { type SKUName string const ( - SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" SKUNameStandardRAGRS SKUName = "Standard_RAGRS" SKUNameStandardZRS SKUName = "Standard_ZRS" - SKUNamePremiumLRS SKUName = "Premium_LRS" ) // PossibleSKUNameValues returns the possible values for the SKUName const type. func PossibleSKUNameValues() []SKUName { return []SKUName{ - SKUNameStandardLRS, + SKUNamePremiumLRS, SKUNameStandardGRS, + SKUNameStandardLRS, SKUNameStandardRAGRS, SKUNameStandardZRS, - SKUNamePremiumLRS, } } type SequenceNumberActionType string const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" SequenceNumberActionTypeMax SequenceNumberActionType = "max" SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" - SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" ) // PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, SequenceNumberActionTypeMax, SequenceNumberActionTypeUpdate, - SequenceNumberActionTypeIncrement, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 5fcac5135..dbc2a293e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -25,46 +24,38 @@ import ( ) // ContainerClient contains the methods for the Container group. -// Don't use this type directly, use NewContainerClient() instead. +// Don't use this type directly, use a constructor function instead. type ContainerClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewContainerClient creates a new instance of ContainerClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { - client := &ContainerClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -91,7 +82,7 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -102,6 +93,16 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du // acquireLeaseHandleResponse handles the AcquireLease response. func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -115,22 +116,12 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -138,22 +129,25 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -179,7 +173,7 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -190,6 +184,16 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti // breakLeaseHandleResponse handles the BreakLease response. func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -208,22 +212,12 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -231,7 +225,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -239,18 +233,21 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -275,7 +272,7 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -286,6 +283,16 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea // changeLeaseHandleResponse handles the ChangeLease response. func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -299,22 +306,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -322,22 +319,25 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // fails // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -362,7 +362,7 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -379,6 +379,16 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options // createHandleResponse handles the Create response. func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -389,22 +399,12 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } return result, nil } @@ -412,23 +412,26 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -452,7 +455,7 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -466,19 +469,100 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } + return result, nil +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return ContainerClientDeleteResponse{}, err + return ContainerClientFilterBlobsResponse{}, err } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } return result, nil } @@ -486,23 +570,26 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err } - return client.getAccessPolicyHandleResponse(resp) + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. @@ -521,7 +608,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -535,6 +622,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -545,22 +642,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -570,22 +657,25 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -598,7 +688,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -606,15 +696,12 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -622,11 +709,14 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -635,22 +725,25 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -668,7 +761,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -679,42 +772,12 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o // getPropertiesHandleResponse handles the GetProperties response. func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { result := ContainerClientGetPropertiesResponse{} - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -722,8 +785,18 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) @@ -739,29 +812,49 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.HasLegalHold = &hasLegalHold } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. // @@ -790,7 +883,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -801,17 +894,11 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -820,6 +907,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { return ContainerClientListBlobFlatSegmentResponse{}, err } @@ -828,7 +921,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. @@ -840,23 +933,16 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.pl.Do(req) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) - } return client.ListBlobHierarchySegmentHandleResponse(resp) }, }) @@ -888,7 +974,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -899,17 +985,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -918,6 +998,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -928,23 +1014,26 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -968,7 +1057,7 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -979,6 +1068,16 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -989,44 +1088,37 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err } - return client.renameHandleResponse(resp) + resp, err := client.renameHandleResponse(httpResp) + return resp, err } // renameCreateRequest creates the Rename request. @@ -1042,7 +1134,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1060,12 +1152,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1073,6 +1159,12 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1080,23 +1172,26 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1120,7 +1215,7 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1131,6 +1226,16 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas // renewLeaseHandleResponse handles the RenewLease response. func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1144,43 +1249,36 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err } - return client.restoreHandleResponse(resp) + resp, err := client.restoreHandleResponse(httpResp) + return resp, err } // restoreCreateRequest creates the Restore request. @@ -1196,7 +1294,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1216,12 +1314,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1229,6 +1321,12 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1236,25 +1334,28 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - containerACL - the acls for the container // - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err } - return client.setAccessPolicyHandleResponse(resp) + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. @@ -1282,7 +1383,7 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1291,12 +1392,25 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1307,45 +1421,38 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } return result, nil } // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -1374,7 +1481,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1385,6 +1492,16 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt // setMetadataHandleResponse handles the SetMetadata response. func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1395,47 +1512,40 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } return result, nil } // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -1454,12 +1564,15 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, multipartContentType) + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 022807f53..7251de839 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -26,89 +25,6 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. type ArrowConfiguration struct { // REQUIRED @@ -124,403 +40,11 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - type BlobFlatListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` } -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string -} - type BlobHierarchyListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` @@ -554,6 +78,14 @@ type BlobItem struct { VersionID *string `xml:"VersionId"` } +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + type BlobPrefix struct { // REQUIRED Name *string `xml:"Name"` @@ -636,141 +168,6 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - type BlockList struct { CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` @@ -790,250 +187,6 @@ type ClearRange struct { Start *int64 `xml:"Start"` } -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - // ContainerItem - An Azure Storage container type ContainerItem struct { // REQUIRED @@ -1095,27 +248,6 @@ type CORSRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. type DelimitedTextConfiguration struct { // The string used to separate columns. @@ -1140,10 +272,12 @@ type FilterBlobItem struct { ContainerName *string `xml:"ContainerName"` // REQUIRED - Name *string `xml:"Name"` + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` // Blob tags - Tags *BlobTags `xml:"Tags"` + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` } // FilterBlobSegment - The result of a Filter Blobs API call @@ -1185,12 +319,6 @@ type KeyInfo struct { Start *string `xml:"Start"` } -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { // REQUIRED @@ -1270,195 +398,6 @@ type Metrics struct { Version *string `xml:"Version"` } -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - // PageList - the list of pages type PageList struct { ClearRange []*ClearRange `xml:"ClearRange"` @@ -1521,120 +460,6 @@ type RetentionPolicy struct { Days *int32 `xml:"Days"` } -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - // SignedIdentifier - signed identifier type SignedIdentifier struct { // REQUIRED; An Access policy @@ -1644,20 +469,6 @@ type SignedIdentifier struct { ID *string `xml:"Id"` } -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // REQUIRED; Indicates whether this account is hosting a static website @@ -1674,7 +485,7 @@ type StaticWebsite struct { } type StorageError struct { - Message *string `json:"Message,omitempty"` + Message *string } // StorageServiceProperties - Storage Service Properties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index e5b6cda2b..7e094db87 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -24,12 +23,12 @@ func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), } return enc.EncodeElement(aux, start) } @@ -39,8 +38,8 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(a), } @@ -101,48 +100,30 @@ func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartEl return enc.EncodeElement(aux, start) } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. -func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - // MarshalXML implements the xml.Marshaller interface for type BlobProperties. func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), } if b.ContentMD5 != nil { encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) @@ -156,15 +137,15 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(b), } @@ -267,12 +248,12 @@ func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), } return enc.EncodeElement(aux, start) } @@ -282,8 +263,8 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(c), } @@ -315,10 +296,10 @@ func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), } return enc.EncodeElement(aux, start) } @@ -328,7 +309,7 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(g), } @@ -432,12 +413,12 @@ func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), } return enc.EncodeElement(aux, start) } @@ -447,8 +428,8 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(u), } @@ -470,6 +451,16 @@ func populate(m map[string]any, k string, v any) { } } +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + func unpopulate(data json.RawMessage, fn string, v any) error { if data == nil { return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 000000000..216f8b73a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1469 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index b209e99f0..cb6a19f7a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,27 +21,16 @@ import ( ) // PageBlobClient contains the methods for the PageBlob group. -// Don't use this type directly, use NewPageBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type PageBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { - client := &PageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -52,18 +40,21 @@ func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err } - return client.clearPagesHandleResponse(resp) + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err } // clearPagesCreateRequest creates the ClearPages request. @@ -122,7 +113,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -133,15 +124,22 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte // clearPagesHandleResponse handles the ClearPages response. func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -150,22 +148,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.Date = &date } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -173,13 +171,6 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } return result, nil } @@ -190,7 +181,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -198,18 +189,21 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err } - return client.copyIncrementalHandleResponse(resp) + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err } // copyIncrementalCreateRequest creates the CopyIncremental request. @@ -240,7 +234,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-copy-source"] = []string{copySource} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -251,6 +245,22 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, // copyIncrementalHandleResponse handles the CopyIncremental response. func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -261,35 +271,19 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. @@ -300,18 +294,21 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -389,7 +386,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -412,15 +409,8 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe // createHandleResponse handles the Create response. func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -429,18 +419,6 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -448,6 +426,15 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -455,11 +442,21 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -467,7 +464,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -478,23 +475,16 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) - } - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } - resp, err := client.pl.Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesHandleResponse(resp) }, }) @@ -542,7 +532,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -553,16 +543,6 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op // GetPageRangesHandleResponse handles the GetPageRanges response. func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -573,12 +553,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -586,6 +560,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -595,7 +585,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -606,23 +596,16 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - resp, err := client.pl.Do(req) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesDiffHandleResponse(resp) }, }) @@ -676,7 +659,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -687,16 +670,6 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -707,12 +680,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -720,6 +687,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -729,7 +712,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. // - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. @@ -738,18 +721,21 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err } - return client.resizeHandleResponse(resp) + resp, err := client.resizeHandleResponse(httpResp) + return resp, err } // resizeCreateRequest creates the Resize request. @@ -795,7 +781,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -806,16 +792,6 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte // resizeHandleResponse handles the Resize response. func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -826,12 +802,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -839,13 +809,29 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to // page blobs only. This property indicates how the service should modify the blob's sequence number // - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber @@ -853,18 +839,21 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err } - return client.updateSequenceNumberHandleResponse(resp) + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err } // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. @@ -901,7 +890,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -912,16 +901,6 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -932,12 +911,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -945,13 +918,29 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - body - Initial data // - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. @@ -962,18 +951,21 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err } - return client.uploadPagesHandleResponse(resp) + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err } // uploadPagesCreateRequest creates the UploadPages request. @@ -1038,33 +1030,29 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadPagesHandleResponse handles the UploadPages response. func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.LastModified = &lastModified + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1073,21 +1061,12 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1096,6 +1075,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1103,11 +1091,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1116,7 +1111,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // a URL // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - sourceURL - Specify a URL to the copy source. // - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header // and x-ms-range/Range destination range header. @@ -1134,18 +1129,21 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err } - return client.uploadPagesFromURLHandleResponse(resp) + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. @@ -1222,7 +1220,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1236,22 +1234,12 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1260,18 +1248,12 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1280,6 +1262,15 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1287,11 +1278,18 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 386c943e4..738d23c8f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -266,6 +265,9 @@ type BlobClientCopyFromURLResponse struct { // ETag contains the information returned from the ETag header response. ETag *azcore.ETag + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -407,6 +409,9 @@ type BlobClientDownloadResponse struct { // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. CopyStatusDescription *string + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + // Date contains the information returned from the Date header response. Date *time.Time @@ -650,18 +655,20 @@ type BlobClientGetPropertiesResponse struct { // BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. type BlobClientGetTagsResponse struct { + // Blob tags BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlobClientQueryResponse contains the response from method BlobClient.Query. @@ -1045,29 +1052,30 @@ type BlockBlobClientCommitBlockListResponse struct { // BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. type BlockBlobClientGetBlockListResponse struct { BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. @@ -1310,31 +1318,49 @@ type ContainerClientDeleteResponse struct { Version *string } +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + BlobPublicAccess *PublicAccessType // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // a collection of signed identifiers SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. @@ -1412,40 +1438,44 @@ type ContainerClientGetPropertiesResponse struct { // ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. @@ -1675,52 +1705,56 @@ type PageBlobClientCreateResponse struct { // PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. type PageBlobClientGetPageRangesResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. @@ -1848,18 +1882,20 @@ type PageBlobClientUploadPagesResponse struct { // ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. @@ -1888,60 +1924,68 @@ type ServiceClientGetAccountInfoResponse struct { // ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index b700211c5..c792fbf09 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -3,15 +3,15 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -22,21 +22,10 @@ import ( ) // ServiceClient contains the methods for the Service group. -// Don't use this type directly, use NewServiceClient() instead. +// Don't use this type directly, use a constructor function instead. type ServiceClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewServiceClient creates a new instance of ServiceClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { - client := &ServiceClient{ - endpoint: endpoint, - pl: pl, - } - return client } // FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search @@ -44,22 +33,25 @@ func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -80,8 +72,11 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -95,12 +90,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -108,6 +97,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -117,21 +112,24 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -144,7 +142,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -152,15 +150,12 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -168,12 +163,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { @@ -181,6 +170,15 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -188,21 +186,24 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -218,7 +219,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -248,21 +249,24 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err } - return client.getStatisticsHandleResponse(resp) + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err } // getStatisticsCreateRequest creates the GetStatistics request. @@ -278,7 +282,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -292,12 +296,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -305,6 +303,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -315,23 +319,26 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - keyInfo - Key information // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err } - return client.getUserDelegationKeyHandleResponse(resp) + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. @@ -347,12 +354,15 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, keyInfo) + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. @@ -361,12 +371,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -374,6 +378,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -383,7 +393,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. // @@ -411,7 +421,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -441,22 +451,25 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err } - return client.setPropertiesHandleResponse(resp) + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err } // setPropertiesCreateRequest creates the SetProperties request. @@ -472,12 +485,15 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, storageServiceProperties) + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil } // setPropertiesHandleResponse handles the SetProperties response. @@ -498,25 +514,28 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-11-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -534,12 +553,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, multipartContentType) + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 4b4d51aa3..586650329 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,29 +14,29 @@ import ( ) const ( - rfc1123JSON = `"` + time.RFC1123 + `"` + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` ) -type timeRFC1123 time.Time +type dateTimeRFC1123 time.Time -func (t timeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(rfc1123JSON)) +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) return b, nil } -func (t timeRFC1123) MarshalText() ([]byte, error) { +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { b := []byte(time.Time(t).Format(time.RFC1123)) return b, nil } -func (t *timeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) - *t = timeRFC1123(p) +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) return err } -func (t *timeRFC1123) UnmarshalText(data []byte) error { +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { p, err := time.Parse(time.RFC1123, string(data)) - *t = timeRFC1123(p) + *t = dateTimeRFC1123(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 1ce9d6211..82b370133 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,45 +14,45 @@ import ( "time" ) -const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` -) - // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) -type timeRFC3339 time.Time +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON if tzOffsetRegex.Match(data) { - layout = rfc3339JSON + layout = dateTimeJSON } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime if tzOffsetRegex.Match(data) { layout = time.RFC3339Nano } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 144ea18e1..1bd0e4de0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -3,14 +3,16 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" "strings" ) @@ -19,22 +21,32 @@ type additionalProperties map[string]*string // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) - break + tokValue = "" case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: if tokName == "" { continue } if *ap == nil { *ap = additionalProperties{} } - s := string(tt) - (*ap)[tokName] = &s + (*ap)[tokName] = to.Ptr(tokValue) tokName = "" - break } } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index ec5541bfb..9f95ad8ae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -11,10 +11,15 @@ import ( "errors" ) +const ( + DefaultConcurrency = 5 +) + // BatchTransferOptions identifies options used by doBatchTransfer. type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 + NumChunks uint64 Concurrency uint16 Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string @@ -28,13 +33,12 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } if o.Concurrency == 0 { - o.Concurrency = 5 // default concurrency + o.Concurrency = DefaultConcurrency // default concurrency } // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,10 +54,10 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize - if chunkNum == numChunks-1 { // Last chunk + if chunkNum == o.NumChunks-1 { // Last chunk curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total } offset := int64(chunkNum) * o.ChunkSize @@ -65,7 +69,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 000000000..e3aa4a488 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go index e7c8e9213..fff61016c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -8,11 +8,12 @@ package shared import ( "errors" + "net/http" + "strings" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "strings" ) type storageAuthorizer struct { @@ -20,13 +21,14 @@ type storageAuthorizer struct { tenantID string } -func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy { - s := storageAuthorizer{scopes: []string{TokenScope}} - return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{ +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ AuthorizationHandler: policy.AuthorizationHandler{ OnRequest: s.onRequest, OnChallenge: s.onChallenge, }, + InsecureAllowCredentialWithHTTP: allowHTTP, }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go similarity index 89% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go index 5f7f4d828..cdcadf311 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -5,7 +5,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,20 +14,20 @@ import ( ) // mmb is a memory mapped buffer -type mmb []byte +type Mmb []byte // newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +func NewMMB(size int64) (Mmb, error) { prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) if err != nil { return nil, os.NewSyscallError("Mmap", err) } - return mmb(addr), nil + return Mmb(addr), nil } // delete cleans up the memory mapped buffer -func (m *mmb) delete() { +func (m *Mmb) Delete() { err := syscall.Munmap(*m) *m = nil if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go similarity index 82% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go index 3f966d65b..ef9fdc2a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -4,7 +4,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,11 +14,11 @@ import ( "unsafe" ) -// mmb is a memory mapped buffer -type mmb []byte +// Mmb is a memory mapped buffer +type Mmb []byte -// newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { const InvalidHandleValue = ^uintptr(0) // -1 prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) @@ -35,7 +35,7 @@ func newMMB(size int64) (mmb, error) { return nil, os.NewSyscallError("MapViewOfFile", err) } - m := mmb{} + m := Mmb{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr h.Len = int(size) @@ -43,10 +43,10 @@ func newMMB(size int64) (mmb, error) { return m, nil } -// delete cleans up the memory mapped buffer -func (m *mmb) delete() { +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmb{} + *m = Mmb{} err := syscall.UnmapViewOfFile(addr) if err != nil { // if we get here, there is likely memory corruption. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 02936ad03..c7922076f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -135,9 +135,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if len(tagsMap) == 0 { - return nil - } blobTagSet := make([]*generated.BlobTag, 0) for key, val := range tagsMap { newKey, newVal := key, val @@ -248,3 +245,27 @@ func IsIPEndpointStyle(host string) bool { } return net.ParseIP(host) != nil } + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 69328dbeb..30d0253af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -35,12 +36,16 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,9 +54,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -61,10 +69,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -119,7 +130,7 @@ func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. @@ -131,7 +142,7 @@ func (pb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. @@ -228,7 +239,7 @@ func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[ if err != nil { return GetPageRangesResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesResponse{}, err } @@ -261,7 +272,7 @@ func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtim if err != nil { return GetPageRangesDiffResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesDiffResponse{}, err } @@ -417,6 +428,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return pb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index e6148f173..39aef20ff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -198,7 +198,7 @@ type GetPageRangesOptions struct { func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil + return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go index 454a08cb9..4069bb132 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -25,13 +25,14 @@ type UserDelegationCredential = exported.UserDelegationCredential // AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas type AccountSignatureValues struct { - Version string `param:"sv"` // If not specified, this format to SASVersion - Protocol Protocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() - IPRange IPRange `param:"sip"` - ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` } // SignWithSharedKey uses an account's shared key credential to sign this signature values to produce @@ -68,6 +69,7 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey v.IPRange.String(), string(v.Protocol), v.Version, + v.EncryptionScope, ""}, // That is right, the account SAS requires a terminating extra newline "\n") @@ -77,12 +79,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Account-specific SAS parameters services: "b", // will always be "b" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 4d97372da..20f9875a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -8,6 +8,7 @@ package sas import ( "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "net" "net/url" "strings" @@ -23,7 +24,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2020-02-10" + Version = generated.ServiceVersion ) // TimeFormats ISO 8601 format. @@ -143,6 +144,7 @@ type QueryParameters struct { authorizedObjectID string `param:"saoid"` unauthorizedObjectID string `param:"suoid"` correlationID string `param:"scid"` + encryptionScope string `param:"ses"` // private member used for startTime and expiryTime formatting. stTimeFormat string seTimeFormat string @@ -163,6 +165,11 @@ func (p *QueryParameters) SignedCorrelationID() string { return p.correlationID } +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + // SignedOID returns signedOID. func (p *QueryParameters) SignedOID() string { return p.signedOID @@ -355,6 +362,9 @@ func (p *QueryParameters) Encode() string { if p.correlationID != "" { v.Add("scid", p.correlationID) } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } return v.Encode() } @@ -429,6 +439,8 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q p.unauthorizedObjectID = val case "scid": p.correlationID = val + case "ses": + p.encryptionScope = val default: isSASKey = false // We didn't recognize the query parameter } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index b900e2b06..45f730847 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -40,6 +40,7 @@ type BlobSignatureValues struct { AuthorizedObjectID string // saoid UnauthorizedObjectID string // suoid CorrelationID string // scid + EncryptionScope string `param:"ses"` } func getDirectoryDepth(path string) string { @@ -51,17 +52,11 @@ func getDirectoryDepth(path string) string { // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if v.ExpiryTime.IsZero() || v.Permissions == "" { + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -76,6 +71,21 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { v.Version = Version } @@ -94,7 +104,8 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -109,12 +120,13 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, @@ -202,7 +214,8 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -217,12 +230,13 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index fb7b8696a..cf39c3d57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -11,9 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -21,8 +18,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -40,12 +40,16 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, &cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,9 +58,12 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -66,10 +73,14 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -126,6 +137,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() @@ -135,7 +150,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.credential())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -184,6 +199,9 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if o.Include.Metadata { listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) } + if o.Include.System { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) + } listOptions.Marker = o.Marker listOptions.Maxresults = o.MaxResults listOptions.Prefix = o.Prefix @@ -204,7 +222,7 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if err != nil { return ListContainersResponse{}, err } - resp, err := s.generated().Pipeline().Do(req) + resp, err := s.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListContainersResponse{}, err } @@ -262,7 +280,6 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A st := o.format() qps, err := sas.AccountSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, Permissions: permissions.String(), ResourceTypes: resources.String(), StartTime: st, @@ -302,7 +319,8 @@ func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := s.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := s.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go index 22f49474d..b70724d79 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -156,6 +156,9 @@ type ListContainersInclude struct { // Tells the service whether to return soft-deleted containers. Deleted bool + + // Tells the service whether to return system containers. + System bool } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/modules.txt b/vendor/modules.txt index 7718d9481..d6e1c86f0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -77,7 +77,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob From f42d3c812e9c3b7c653fe680fb754cfad327c222 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 9 Apr 2024 06:58:03 +0000 Subject: [PATCH 126/157] fix: allow http auth in volume clone --- pkg/blob/controllerserver.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index 5d72f091e..d976a3920 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -923,7 +923,9 @@ func (d *Driver) generateSASToken(accountName, accountKey, storageEndpointSuffix if err != nil { return "", status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new shared key credential, accountName: %s, err: %s", accountName, err.Error())) } - serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.%s/", accountName, storageEndpointSuffix), credential, nil) + clientOptions := service.ClientOptions{} + clientOptions.InsecureAllowCredentialWithHTTP = true + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.%s/", accountName, storageEndpointSuffix), credential, &clientOptions) if err != nil { return "", status.Errorf(codes.Internal, fmt.Sprintf("failed to generate sas token in creating new client with shared key credential, accountName: %s, err: %s", accountName, err.Error())) } From 2f39c827cb17906695e9763c948d4991b9238986 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 16:52:34 +0000 Subject: [PATCH 127/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.10 to 0.0.12. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.10...pkg/azclient/v0.0.12) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- .../Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 7 +++++++ .../sdk/azcore/internal/pollers/loc/loc.go | 8 ++++---- .../sdk/azcore/internal/pollers/util.go | 13 +++++++++++++ .../sdk/azcore/internal/shared/constants.go | 2 +- .../azure-sdk-for-go/sdk/azcore/runtime/poller.go | 5 +++++ vendor/modules.txt | 4 ++-- 8 files changed, 38 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 95ba73dfe..958b597b4 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 59e0bace4..c4584b2d1 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 h1:U/kwEXj0Y+1REAkV4kV8VO1CsEp8tSaQDG/7qC5XuqQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= @@ -468,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 h1:bI+ZLfnsvExyxk/mmwmOnFHbfRyv0kc0Zk5sVh+w75A= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10/go.mod h1:WxbuPyPqXd+yNIkWNJ/gyjAZp/DXKGHDOdZH6DXsKRs= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 h1:hZAEae4yxdVjoRbntb2GjJKtE/ZFqAjb6bvqnsOpRZ0= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12/go.mod h1:u336a/gqvvV5G8DXJNHyN6cRHPOzsvwvNmmBWYSrpI0= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 6f32eda55..a6675492b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,12 @@ # Release History +## 1.11.1 (2024-04-02) + +### Bugs Fixed + +* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure. +* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal. + ## 1.11.0 (2024-04-01) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index b0e62b1df..7a56c5211 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -103,10 +103,10 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } else if resp.StatusCode > 199 && resp.StatusCode < 300 { // any 2xx other than a 202 indicates success p.CurState = poller.StatusSucceeded - } else if resp.StatusCode == http.StatusTooManyRequests { - // the request is being throttled. DO NOT include - // this as a terminal failure. preserve the - // existing state and return the response. + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. } else { p.CurState = poller.StatusFailed } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index 08e8d24e4..eb3cf651d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -185,3 +185,16 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { } return nil } + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 4d3465109..03691cbf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.11.0" + Version = "v1.11.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index b0be7e5e7..03f76c9aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -334,6 +334,11 @@ func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } // the LRO failed. record the error p.err = err } else if err != nil { diff --git a/vendor/modules.txt b/vendor/modules.txt index 7718d9481..5833bd120 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm @@ -1619,7 +1619,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.10 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From 3ccb8000286512a75f1538e28b1f687ee639e770 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:00:50 +0000 Subject: [PATCH 128/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.3 to 0.0.4. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.3...pkg/azclient/v0.0.4) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 958b597b4..ee73c5194 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index c4584b2d1..67830610a 100644 --- a/go.sum +++ b/go.sum @@ -470,8 +470,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 h1:hZAEae4yxdVjoRbntb2GjJKtE/ZFqAjb6bvqnsOpRZ0= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12/go.mod h1:u336a/gqvvV5G8DXJNHyN6cRHPOzsvwvNmmBWYSrpI0= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 h1:4Sk81u8kxM+yTjEfC9lPP3YyLpWeDTFRquI/3a5AzRY= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3/go.mod h1:FWY5fW/WLg/S1Iul/Mh5YeRyQhl8Ii1Sk+YLLOj3KDk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 h1:CFMHYo6/OQpLTycJGQIze2pchNeJQ7L2TQC6fDo4JGY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4/go.mod h1:PvXgFxPcfve6yBiWNIO/fqAMvGVC9W7qN6M2vIj4zmY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index 5833bd120..b953f868c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1662,7 +1662,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.3 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From 216b501013f01ba05dfaa4c68ad0842262a06ec4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Apr 2024 16:10:50 +0000 Subject: [PATCH 129/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.12 to 0.0.13. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.12...pkg/azclient/v0.0.13) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- .../sdk/azidentity/CHANGELOG.md | 9 +++++++++ .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/azidentity.go | 3 +++ .../sdk/azidentity/go.work.sum | 6 ++++-- .../sdk/azidentity/managed_identity_client.go | 18 +++++++++--------- .../azure-sdk-for-go/sdk/azidentity/version.go | 2 +- .../apps/confidential/confidential.go | 4 ++-- .../apps/public/public.go | 8 ++++++++ vendor/github.com/golang-jwt/jwt/v5/ecdsa.go | 4 ++-- vendor/github.com/golang-jwt/jwt/v5/hmac.go | 4 ++-- vendor/modules.txt | 8 ++++---- 13 files changed, 57 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 1db9a05f1..3e0ae7384 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.21.4 require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 @@ -56,7 +56,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect @@ -80,7 +80,7 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.2 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 7b265c26c..33af417d8 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= @@ -57,8 +57,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -132,8 +132,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -468,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 h1:hZAEae4yxdVjoRbntb2GjJKtE/ZFqAjb6bvqnsOpRZ0= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12/go.mod h1:u336a/gqvvV5G8DXJNHyN6cRHPOzsvwvNmmBWYSrpI0= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 h1:dxpo41/N6m2R//9fmqKgqYZL2k0rQSE1NvQteIQ9pGA= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13/go.mod h1:tN2BDTM6RDyQsae6JRvaaA14LVxDsRaLU3Ea2MRUBjg= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 h1:CFMHYo6/OQpLTycJGQIze2pchNeJQ7L2TQC6fDo4JGY= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4/go.mod h1:PvXgFxPcfve6yBiWNIO/fqAMvGVC9W7qN6M2vIj4zmY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 71dcb5f3e..f6749c030 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,14 @@ # Release History +## 1.5.2 (2024-04-09) + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + +### Other Changes +* Restored v1.4.0 error behavior for empty tenant IDs +* Upgraded dependencies + ## 1.5.1 (2024-01-17) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 173ce2b3c..1be55a4bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_db4a26f583" + "Tag": "go/azidentity_98074050dc" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index c3bcfb56c..b0965036b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -121,6 +121,9 @@ func alphanumeric(r rune) bool { } func validTenantID(tenantID string) bool { + if len(tenantID) < 1 { + return false + } for _, r := range tenantID { if !(alphanumeric(r) || r == '.' || r == '-') { return false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum index 7cd86b001..65bcba7df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -3,8 +3,6 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9an github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -14,6 +12,7 @@ github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -23,6 +22,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -33,6 +34,7 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 7c25cb8bd..d129a1e91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -34,14 +34,14 @@ const ( identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + miResID = "mi_res_id" msiEndpoint = "MSI_ENDPOINT" + msiResID = "msi_res_id" msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" + qpClientID = "client_id" serviceFabricAPIVersion = "2019-07-01-preview" - - qpClientID = "client_id" - qpResID = "mi_res_id" ) type msiType int @@ -286,7 +286,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma q.Add("resource", strings.Join(scopes, " ")) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(msiResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -306,7 +306,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, q.Add("resource", scopes[0]) if id != nil { if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -329,7 +329,7 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id if id.idKind() == miResourceID { log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") q.Set("clientid", "") - q.Set(qpResID, id.String()) + q.Set(miResID, id.String()) } else { q.Set("clientid", id.String()) } @@ -351,7 +351,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte if id != nil { log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -411,7 +411,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i if id != nil { log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } @@ -437,7 +437,7 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") q := request.Raw().URL.Query() if id.idKind() == miResourceID { - q.Add(qpResID, id.String()) + q.Add(miResID, id.String()) } else { q.Add(qpClientID, id.String()) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index e8caeea71..9b9d7ae0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.5.1" + version = "v1.5.2" ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 1841d146f..f86286051 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -48,8 +48,8 @@ duplication. .Net People, Take note on X509: This uses x509.Certificates and private keys. x509 does not store private keys. .Net -has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net -added, it doesn't exist in real life. As such I've put a PEM decoder into here. +has a x509.Certificate2 abstraction that has private keys, but that just a strange invention. +As such I've put a PEM decoder into here. */ // TODO(msal): This should have example code for each method on client using Go's example doc framework. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index e346ff3df..392e5e43f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -217,11 +217,13 @@ func WithClaims(claims string) interface { func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption } { return struct { AcquireSilentOption AcquireInteractiveOption + AcquireByUsernamePasswordOption options.CallOption }{ CallOption: options.NewCallOption( @@ -231,6 +233,8 @@ func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { t.authnScheme = authnScheme case *interactiveAuthOptions: t.authnScheme = authnScheme + case *acquireTokenByUsernamePasswordOptions: + t.authnScheme = authnScheme default: return fmt.Errorf("unexpected options type %T", a) } @@ -349,6 +353,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts // acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword type acquireTokenByUsernamePasswordOptions struct { claims, tenantID string + authnScheme AuthenticationScheme } // AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword @@ -374,6 +379,9 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s authParams.Claims = o.claims authParams.Username = username authParams.Password = password + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } token, err := pca.base.Token.UsernamePassword(ctx, authParams) if err != nil { diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go index ca85659ba..c929e4a02 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf case *ecdsa.PublicKey: ecdsaKey = k default: - return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType) + return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType) } if len(sig) != 2*m.KeySize { @@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte case *ecdsa.PrivateKey: ecdsaKey = k default: - return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType) + return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType) } // Create the hasher diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go index 96c62722d..aca600ce1 100644 --- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go +++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { if keyBytes, ok := key.([]byte); ok { if !m.Hash.Available() { - return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) + return nil, ErrHashUnavailable } hasher := hmac.New(m.Hash.New, keyBytes) @@ -100,5 +100,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, return hasher.Sum(nil), nil } - return nil, ErrInvalidKeyType + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 04a907486..bf672882e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -34,7 +34,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -120,7 +120,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -228,7 +228,7 @@ github.com/gogo/protobuf/sortkeys # github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 -# github.com/golang-jwt/jwt/v5 v5.2.0 +# github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -1619,7 +1619,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.12 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From 4c7ff2202c9b143731be24819e2c625ac09044da Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 12 Apr 2024 07:19:12 +0000 Subject: [PATCH 130/157] doc: add new charts --- charts/index.yaml | 111 +++--- charts/v1.22.6/blob-csi-driver-v1.22.6.tgz | Bin 0 -> 5869 bytes charts/v1.22.6/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 215 ++++++++++++ .../templates/csi-blob-driver.yaml | 14 + .../templates/csi-blob-node.yaml | 323 +++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.22.6/blob-csi-driver/values.yaml | 180 ++++++++++ charts/v1.23.4/blob-csi-driver-v1.23.4.tgz | Bin 0 -> 5944 bytes charts/v1.23.4/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 216 ++++++++++++ .../templates/csi-blob-driver.yaml | 16 + .../templates/csi-blob-node.yaml | 326 ++++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.23.4/blob-csi-driver/values.yaml | 180 ++++++++++ charts/v1.24.1/blob-csi-driver-v1.24.1.tgz | Bin 0 -> 5981 bytes charts/v1.24.1/blob-csi-driver/Chart.yaml | 5 + .../blob-csi-driver/templates/NOTES.txt | 5 + .../blob-csi-driver/templates/_helpers.tpl | 49 +++ .../templates/csi-blob-controller.yaml | 214 ++++++++++++ .../templates/csi-blob-driver.yaml | 16 + .../templates/csi-blob-node.yaml | 326 ++++++++++++++++++ .../templates/rbac-csi-blob-controller.yaml | 115 ++++++ .../templates/rbac-csi-blob-node.yaml | 29 ++ .../serviceaccount-csi-blob-controller.yaml | 17 + .../serviceaccount-csi-blob-node.yaml | 17 + charts/v1.24.1/blob-csi-driver/values.yaml | 181 ++++++++++ deploy/v1.22.6/csi-blob-controller.yaml | 143 ++++++++ deploy/v1.22.6/csi-blob-driver.yaml | 12 + deploy/v1.22.6/csi-blob-node.yaml | 203 +++++++++++ deploy/v1.22.6/kustomization.yaml | 10 + deploy/v1.22.6/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.22.6/rbac-csi-blob-node.yaml | 30 ++ deploy/v1.23.4/csi-blob-controller.yaml | 144 ++++++++ deploy/v1.23.4/csi-blob-driver.yaml | 12 + deploy/v1.23.4/csi-blob-node.yaml | 208 +++++++++++ deploy/v1.23.4/kustomization.yaml | 10 + deploy/v1.23.4/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.23.4/rbac-csi-blob-node.yaml | 30 ++ deploy/v1.24.1/csi-blob-controller.yaml | 142 ++++++++ deploy/v1.24.1/csi-blob-driver.yaml | 12 + deploy/v1.24.1/csi-blob-node.yaml | 239 +++++++++++++ deploy/v1.24.1/kustomization.yaml | 10 + deploy/v1.24.1/rbac-csi-blob-controller.yaml | 108 ++++++ deploy/v1.24.1/rbac-csi-blob-node.yaml | 30 ++ docs/install-csi-driver-v1.22.6.md | 47 +++ docs/install-csi-driver-v1.23.4.md | 47 +++ docs/install-csi-driver-v1.24.1.md | 47 +++ 58 files changed, 4687 insertions(+), 42 deletions(-) create mode 100644 charts/v1.22.6/blob-csi-driver-v1.22.6.tgz create mode 100644 charts/v1.22.6/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.22.6/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.22.6/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.22.6/blob-csi-driver/values.yaml create mode 100644 charts/v1.23.4/blob-csi-driver-v1.23.4.tgz create mode 100644 charts/v1.23.4/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.23.4/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.23.4/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.23.4/blob-csi-driver/values.yaml create mode 100644 charts/v1.24.1/blob-csi-driver-v1.24.1.tgz create mode 100644 charts/v1.24.1/blob-csi-driver/Chart.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/NOTES.txt create mode 100644 charts/v1.24.1/blob-csi-driver/templates/_helpers.tpl create mode 100644 charts/v1.24.1/blob-csi-driver/templates/csi-blob-controller.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/csi-blob-driver.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/csi-blob-node.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/rbac-csi-blob-controller.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/rbac-csi-blob-node.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml create mode 100644 charts/v1.24.1/blob-csi-driver/values.yaml create mode 100644 deploy/v1.22.6/csi-blob-controller.yaml create mode 100644 deploy/v1.22.6/csi-blob-driver.yaml create mode 100644 deploy/v1.22.6/csi-blob-node.yaml create mode 100644 deploy/v1.22.6/kustomization.yaml create mode 100644 deploy/v1.22.6/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.22.6/rbac-csi-blob-node.yaml create mode 100644 deploy/v1.23.4/csi-blob-controller.yaml create mode 100644 deploy/v1.23.4/csi-blob-driver.yaml create mode 100644 deploy/v1.23.4/csi-blob-node.yaml create mode 100644 deploy/v1.23.4/kustomization.yaml create mode 100644 deploy/v1.23.4/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.23.4/rbac-csi-blob-node.yaml create mode 100644 deploy/v1.24.1/csi-blob-controller.yaml create mode 100644 deploy/v1.24.1/csi-blob-driver.yaml create mode 100644 deploy/v1.24.1/csi-blob-node.yaml create mode 100644 deploy/v1.24.1/kustomization.yaml create mode 100644 deploy/v1.24.1/rbac-csi-blob-controller.yaml create mode 100644 deploy/v1.24.1/rbac-csi-blob-node.yaml create mode 100644 docs/install-csi-driver-v1.22.6.md create mode 100644 docs/install-csi-driver-v1.23.4.md create mode 100644 docs/install-csi-driver-v1.24.1.md diff --git a/charts/index.yaml b/charts/index.yaml index 0ec29f462..833c15ba2 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -1,27 +1,45 @@ apiVersion: v1 entries: blob-csi-driver: + - apiVersion: v1 + appVersion: v1.24.1 + created: "2024-04-12T07:18:31.462033556Z" + description: Azure Blob Storage CSI driver + digest: 82537068e57177cf6d0697c7c942aaf496de9280fbc7b96ab326776fa4db8481 + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.24.1/blob-csi-driver-v1.24.1.tgz + version: v1.24.1 - apiVersion: v1 appVersion: v1.24.0 - created: "2024-02-22T13:38:43.33899957Z" + created: "2024-04-12T07:18:31.461360864Z" description: Azure Blob Storage CSI driver - digest: c9ecca2dde77d9557f3917513c8f5adcb861c2cfa9fe1e08f5bef056ab583efd + digest: 3b3b1a4fee786f0c9445da2f35bd1e251265cec8ee03c3cf99fe9f0bf9575e09 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz version: v1.24.0 + - apiVersion: v1 + appVersion: v1.23.4 + created: "2024-04-12T07:18:31.460274467Z" + description: Azure Blob Storage CSI driver + digest: 20d0157476ecf0a38b2d3982587b9657b27e1603b2185709faeff73b6d671ceb + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.4/blob-csi-driver-v1.23.4.tgz + version: v1.23.4 - apiVersion: v1 appVersion: v1.23.3 - created: "2024-02-22T13:38:43.338374858Z" + created: "2024-04-12T07:18:31.459269166Z" description: Azure Blob Storage CSI driver - digest: 4acef5e84bcbc01fa624b73f14d1331ee14b14e52b4db018f0037fd2ada015c7 + digest: 21c8355faf362c527e40ac417b17d3c28fa041941633254358387997b9bee8a6 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.3/blob-csi-driver-v1.23.3.tgz version: v1.23.3 - apiVersion: v1 appVersion: v1.23.2 - created: "2024-02-22T13:38:43.33777157Z" + created: "2024-04-12T07:18:31.458611981Z" description: Azure Blob Storage CSI driver digest: 057d6658c5879ee7e564d59275366521dc0a2e311c0527e570eaccd544622e60 name: blob-csi-driver @@ -30,7 +48,7 @@ entries: version: v1.23.2 - apiVersion: v1 appVersion: v1.23.1 - created: "2024-02-22T13:38:43.337162249Z" + created: "2024-04-12T07:18:31.457969298Z" description: Azure Blob Storage CSI driver digest: 66215f12a4e3acdcf09416d817b737e14546058b081a2cfd8bf9ef507229ca07 name: blob-csi-driver @@ -39,16 +57,25 @@ entries: version: v1.23.1 - apiVersion: v1 appVersion: v1.23.0 - created: "2024-02-22T13:38:43.336524751Z" + created: "2024-04-12T07:18:31.457353804Z" description: Azure Blob Storage CSI driver digest: 57151e21e33660522f25694bd8ae985e5e17c7ffe09904ad2af4025e8bf1da72 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.23.0/blob-csi-driver-v1.23.0.tgz version: v1.23.0 + - apiVersion: v1 + appVersion: v1.22.6 + created: "2024-04-12T07:18:31.456725713Z" + description: Azure Blob Storage CSI driver + digest: 82e30775aa05e093c30884f480d505b408023dfa32944cf21ebffe9d88afb1f2 + name: blob-csi-driver + urls: + - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.22.6/blob-csi-driver-v1.22.6.tgz + version: v1.22.6 - apiVersion: v1 appVersion: v1.22.5 - created: "2024-02-22T13:38:43.335903602Z" + created: "2024-04-12T07:18:31.456077167Z" description: Azure Blob Storage CSI driver digest: ff3c2c2e05dd048dd0af3e5c7d002eae2928a5d17fb269a1e4d5cadd30e8ab51 name: blob-csi-driver @@ -57,7 +84,7 @@ entries: version: v1.22.5 - apiVersion: v1 appVersion: v1.22.4 - created: "2024-02-22T13:38:43.335229669Z" + created: "2024-04-12T07:18:31.45545871Z" description: Azure Blob Storage CSI driver digest: 6c38e79d2f50616daac0658cfa5b1a569e6ff8ce8f24ed40f563e87fb1d1340a name: blob-csi-driver @@ -66,7 +93,7 @@ entries: version: v1.22.4 - apiVersion: v1 appVersion: v1.22.3 - created: "2024-02-22T13:38:43.333709892Z" + created: "2024-04-12T07:18:31.454792765Z" description: Azure Blob Storage CSI driver digest: 6cdee296d22ecd330f477f2ca6da51b07320c546c04ae46c23eef48146b772c1 name: blob-csi-driver @@ -75,7 +102,7 @@ entries: version: v1.22.3 - apiVersion: v1 appVersion: v1.22.2 - created: "2024-02-22T13:38:43.333082873Z" + created: "2024-04-12T07:18:31.454147089Z" description: Azure Blob Storage CSI driver digest: 259e66dc12db7310fe1c51e49c964398e0a6b7d511133916dd7d25f748f0b791 name: blob-csi-driver @@ -84,7 +111,7 @@ entries: version: v1.22.2 - apiVersion: v1 appVersion: v1.22.1 - created: "2024-02-22T13:38:43.332470917Z" + created: "2024-04-12T07:18:31.453303054Z" description: Azure Blob Storage CSI driver digest: 8329d477d55c82f97bb09fb172c5f39a1677bedc13c7410bd93b306194516438 name: blob-csi-driver @@ -93,7 +120,7 @@ entries: version: v1.22.1 - apiVersion: v1 appVersion: v1.21.7 - created: "2024-02-22T13:38:43.331849146Z" + created: "2024-04-12T07:18:31.451715932Z" description: Azure Blob Storage CSI driver digest: 1095721182d611e2556c611dd330758d8130fe66493db4f9189586a9219896d3 name: blob-csi-driver @@ -102,7 +129,7 @@ entries: version: v1.21.7 - apiVersion: v1 appVersion: v1.21.6 - created: "2024-02-22T13:38:43.33123788Z" + created: "2024-04-12T07:18:31.451094812Z" description: Azure Blob Storage CSI driver digest: d5ba1f92795ec45970eb6e5fc54aa13a5684f9936216c064f8a3843bf722bf54 name: blob-csi-driver @@ -111,7 +138,7 @@ entries: version: v1.21.6 - apiVersion: v1 appVersion: v1.21.5 - created: "2024-02-22T13:38:43.330670488Z" + created: "2024-04-12T07:18:31.450468305Z" description: Azure Blob Storage CSI driver digest: b403e9d49abfe076ecd83d6dd50166347ee4305f33dc840019474b2876723b9b name: blob-csi-driver @@ -120,7 +147,7 @@ entries: version: v1.21.5 - apiVersion: v1 appVersion: v1.21.4 - created: "2024-02-22T13:38:43.330079576Z" + created: "2024-04-12T07:18:31.449862625Z" description: Azure Blob Storage CSI driver digest: e4fa13670caf6b0d3e9fefa55d100daa439cd7187dabd45318ab03c7d4b17710 name: blob-csi-driver @@ -129,7 +156,7 @@ entries: version: v1.21.4 - apiVersion: v1 appVersion: v1.20.3 - created: "2024-02-22T13:38:43.329453983Z" + created: "2024-04-12T07:18:31.449232951Z" description: Azure Blob Storage CSI driver digest: 8c2c20547b2e0e1b39d2f2efd04c1bd778f14af5feae2bda86d722dac3c02643 name: blob-csi-driver @@ -138,7 +165,7 @@ entries: version: v1.20.3 - apiVersion: v1 appVersion: v1.19.6 - created: "2024-02-22T13:38:43.32837523Z" + created: "2024-04-12T07:18:31.448183785Z" description: Azure Blob Storage CSI driver digest: 0007ef225b5658d3989aa6fdc3a91a4b33696a438eee46ad9a675af615cbdf21 name: blob-csi-driver @@ -147,7 +174,7 @@ entries: version: v1.19.6 - apiVersion: v1 appVersion: v1.19.5 - created: "2024-02-22T13:38:43.327646408Z" + created: "2024-04-12T07:18:31.447588419Z" description: Azure Blob Storage CSI driver digest: 183c3e5cd84b709f1455cc7c84ed5bd573e8a24149fd6442d38999835b0a1711 name: blob-csi-driver @@ -156,7 +183,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.0 - created: "2024-02-22T13:38:43.32640302Z" + created: "2024-04-12T07:18:31.446939966Z" description: Azure Blob Storage CSI driver digest: 3eac15488da5be7d1e78431929f7cda35bceb1af3fe107ffbd84606e047c9204 name: blob-csi-driver @@ -165,7 +192,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2024-02-22T13:38:43.325246938Z" + created: "2024-04-12T07:18:31.44632176Z" description: Azure Blob Storage CSI driver digest: 22cfa17fc5e8d771ff8edd26729266a9a8ee55c0e150df85ef15698f7fe985e9 name: blob-csi-driver @@ -174,7 +201,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2024-02-22T13:38:43.32465342Z" + created: "2024-04-12T07:18:31.445647499Z" description: Azure Blob Storage CSI driver digest: bf6249c0e3e3d3d009d4c79ceb7fda9a56c0565b969de753628792ea3ea5ece8 name: blob-csi-driver @@ -183,7 +210,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2024-02-22T13:38:43.324067023Z" + created: "2024-04-12T07:18:31.443843725Z" description: Azure Blob Storage CSI driver digest: 8daa35cd4957695cb64b45da05a15b4020df5545a8ac44c4668dad4bba82c8a9 name: blob-csi-driver @@ -192,7 +219,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2024-02-22T13:38:43.323452746Z" + created: "2024-04-12T07:18:31.443242253Z" description: Azure Blob Storage CSI driver digest: 442bc579b231aab626b9e474e2c0ed3f101d47d61c99aa9a7f863af7ce268d9d name: blob-csi-driver @@ -201,7 +228,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2024-02-22T13:38:43.322864744Z" + created: "2024-04-12T07:18:31.442607498Z" description: Azure Blob Storage CSI driver digest: b577b0b771138109aa90eb09d56fc07273ca0b584a263ee8f789e35796279f31 name: blob-csi-driver @@ -210,7 +237,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2024-02-22T13:38:43.322285362Z" + created: "2024-04-12T07:18:31.44197167Z" description: Azure Blob Storage CSI driver digest: 124e87af2581b374b89a39940698620c23d3eae6dcee518d302461ffea93e9a8 name: blob-csi-driver @@ -219,7 +246,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2024-02-22T13:38:43.321685494Z" + created: "2024-04-12T07:18:31.441339836Z" description: Azure Blob Storage CSI driver digest: 07c4d76017491b3d0bdd70de90e814096938bf7916da0c149c3805294bd57560 name: blob-csi-driver @@ -228,7 +255,7 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2024-02-22T13:38:43.32108312Z" + created: "2024-04-12T07:18:31.440704203Z" description: Azure Blob Storage CSI driver digest: 79716efa958385adf57eb3570843e1b4512d8c801e8e070625e94264f3e917a9 name: blob-csi-driver @@ -237,7 +264,7 @@ entries: version: v1.10.0 - apiVersion: v1 appVersion: v1.9.0 - created: "2024-02-22T13:38:43.344668206Z" + created: "2024-04-12T07:18:31.466447348Z" description: Azure Blob Storage CSI driver digest: fca0b9215d3277346f68c643fb3ead75158971f0d1945ab01ec559196f3cf842 name: blob-csi-driver @@ -246,7 +273,7 @@ entries: version: v1.9.0 - apiVersion: v1 appVersion: v1.8.0 - created: "2024-02-22T13:38:43.344048475Z" + created: "2024-04-12T07:18:31.465869124Z" description: Azure Blob Storage CSI driver digest: 3b78e2ab4f33577c54d4f57276c824717d2ad2aa3741210e938fcaf927bc751f name: blob-csi-driver @@ -255,7 +282,7 @@ entries: version: v1.8.0 - apiVersion: v1 appVersion: v1.7.0 - created: "2024-02-22T13:38:43.343397477Z" + created: "2024-04-12T07:18:31.465292778Z" description: Azure Blob Storage CSI driver digest: 28da5b55c3d2689d6da85eb7da344385e9cb99bdb2af18c24fea93670abfe7ea name: blob-csi-driver @@ -264,7 +291,7 @@ entries: version: v1.7.0 - apiVersion: v1 appVersion: v1.6.0 - created: "2024-02-22T13:38:43.342587905Z" + created: "2024-04-12T07:18:31.464694601Z" description: Azure Blob Storage CSI driver digest: 6f24f2e6623f6f8862e47d4fbdf13b5f351ceec6bb9a4591ef7fc2fca9fc1eef name: blob-csi-driver @@ -273,7 +300,7 @@ entries: version: v1.6.0 - apiVersion: v1 appVersion: v1.5.0 - created: "2024-02-22T13:38:43.34103753Z" + created: "2024-04-12T07:18:31.464152794Z" description: Azure Blob Storage CSI driver digest: 95d14c9b70b319760d388ea47727c8c97e9287867a8852aeb67b7175b52fe8f5 name: blob-csi-driver @@ -282,7 +309,7 @@ entries: version: v1.5.0 - apiVersion: v1 appVersion: v1.4.1 - created: "2024-02-22T13:38:43.340523852Z" + created: "2024-04-12T07:18:31.46361856Z" description: Azure Blob Storage CSI driver digest: 5fcf69c449f065fa1d5722e5a7fed8a28000efa790907e9ff4b552c5fbd16d22 name: blob-csi-driver @@ -291,7 +318,7 @@ entries: version: v1.4.1 - apiVersion: v1 appVersion: v1.4.0 - created: "2024-02-22T13:38:43.340000695Z" + created: "2024-04-12T07:18:31.463097533Z" description: Azure Blob Storage CSI driver digest: b466543344a6411f6130ba87b093955d39ab8614c6b4ed8505a0a0c96073cb33 name: blob-csi-driver @@ -300,7 +327,7 @@ entries: version: v1.4.0 - apiVersion: v1 appVersion: v1.3.0 - created: "2024-02-22T13:38:43.339467398Z" + created: "2024-04-12T07:18:31.462518429Z" description: Azure Blob Storage CSI driver digest: 58d02cb70a3a966b349d62e880b7149fb06ac009474e35e580784fd3c98a5b07 name: blob-csi-driver @@ -309,7 +336,7 @@ entries: version: v1.3.0 - apiVersion: v1 appVersion: v1.2.0 - created: "2024-02-22T13:38:43.328869209Z" + created: "2024-04-12T07:18:31.448639791Z" description: Azure Blob Storage CSI driver digest: 27fb89f20b5fddc7329e6d7c2374857b22c1d61592e397a53f47121eea68c344 name: blob-csi-driver @@ -318,7 +345,7 @@ entries: version: v1.2.0 - apiVersion: v1 appVersion: v1.1.0 - created: "2024-02-22T13:38:43.320462033Z" + created: "2024-04-12T07:18:31.440082082Z" description: Azure Blob Storage CSI driver digest: a251a55243de207c69ef53f72abee45e93b72fa4fc43dc204b7f1cdfd459acdb name: blob-csi-driver @@ -327,7 +354,7 @@ entries: version: v1.1.0 - apiVersion: v1 appVersion: v1.0.0 - created: "2024-02-22T13:38:43.32000298Z" + created: "2024-04-12T07:18:31.439612191Z" description: Azure Blob Storage CSI driver digest: e83f037a165eafc83a978bd7e6bf6221b052ac34363aecb12e6a73607dc58b89 name: blob-csi-driver @@ -336,11 +363,11 @@ entries: version: v1.0.0 - apiVersion: v1 appVersion: latest - created: "2024-02-22T13:38:43.319662364Z" + created: "2024-04-12T07:18:31.439263063Z" description: Azure Blob Storage CSI driver - digest: a8a337da1fb52e80e8bf9e7cd1d6da9179de91c7577a50822547e476324efdfe + digest: c124c5652331e6b3e47e600740a65aa86b1cd31a899a586dec4002c59d51268e name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2024-02-22T13:38:43.318844561Z" +generated: "2024-04-12T07:18:31.438375493Z" diff --git a/charts/v1.22.6/blob-csi-driver-v1.22.6.tgz b/charts/v1.22.6/blob-csi-driver-v1.22.6.tgz new file mode 100644 index 0000000000000000000000000000000000000000..a5c9a50a4a40e51f66605f38468e8ccab15ff1cf GIT binary patch literal 5869 zcmVDc zVQyr3R8em|NM&qo0PH>6a@#nv^Q^BxDYLb+dqQ1&%dD#W;8;pl6-QQCPGGL;{UDo+39!fle5<8o0Ilg`{=t?`=ot#@*QgJkA>3^P9)M@Nmbng_z=B$WE$#cvS@^wwu%G?I+tDWJQ(mp!O|K}(qz*Fed z5E^3vHxRN83S8a@h|8H^6WMTCpx*jyte_C$5Hdj|bYW+G4; zSmJ3q>(zrW^8F$6iM!~a{$#-9kb?j!YY4f53^|(<;+b$C)e$HI^6IdBEJ z0YQG-riZzykQ>ppq-w6uA}?MrhSAt3?xlKCtG#-KZeRi&s0&Ck#3&%fQNTDL0WJqA z)Z;OrNPvgF&VXt%gietbSD{dFm~_xg%20Hg8KxVAH9h8vro`flXu6C}$TZ^Mgv@hX zMkOW`cvInXG%_dTWR4{yUAk!2^z&il`y<0m2mSm@?GA$0Ja~%_urMG496+rm zfX|5w7p}`9Dpmiw9I%8Ak~{+R3jL&-qW*r3VpxO?us~ml@2fVODju3JjutFxpb<#b z9TV&eKxCpGqOX|hhmipFnHWWQZC1<`5yrV|iF_Urm&=DBDLW)_$CTJA15c3=N zc|xCY-Tkr9X3%Ld+yPOIkFi_q6or0iGicGcRGhlX*ifi3rA%V=OUnfIJpG^VBRK;U zGEdY(=3S&v9rW;6M&si#_^U8nX#i6hIS#svgk(XX9-)$<03`OX#5GrgWGW)`3Vr!e zI5-T;Hjv`!3g)0KCX9xhFizy+(#JyM2P%|dr3X_ng+#>XWDbafU=eo#i5cT^Q|WgZ zbs?1L1kkAAuVwXf1DW zFVR$Ss``_KA>%Tl>ZN3eM-(k59l`=pMmb{=jTcCOlxj+2HR(0FOM^{O7%vdq;~?|_ zb=2*$wc6;hRt9>k)iP6JDQuRhq2yAjr%T;m>8$P`tQ44|Uz)YX ztA-XKcAA2O&@oF^WEp`bqT~QWDFFD~62A4m$eB+3|^e>6-!e9^oBP9hvsoGIe%hKLzV(ljqORc498b?p0QC!HsRoCIkgsABp zt2m|oN1#+I*W=aay-kJF?U}N-G{UT1iC#@*}>#4uQTSJ`AEDQ!^;MsQ<%k z15M3XbAJjwETQ{$Sude?iQO3tv7B{KGtg!_4wWvyB>{{i4#GawAYGEtyMeN`hQI?N zgt2^DZ{EBq>)B`3e3cuV7gn}UC4zsW6Y&*GcjmF_w{6i*3fHQ4%!Im7tbf!y5+irD z31Asjha z6{Mk6RqR4A_OdeAE~cQEsVg3%fPL2-zbwy+0C(j(M+fZ z>F=6PXmqbyqtvDkvV)p)%$q(LH{&NYJ*OpS9pu57U|Opw1I}mITPjGck_k}R@5W?a zp`V$yx3T{V>QA)6d~wlLlU*5EX0xJqOWeUikSPTZ1(@QgGWue?h!|^+P+ecBJ(vWl zHiTH{N3q4O2}-r&6lw>lLHb?vmB^U}+m32XqTf~W7vs{+dT9sNp**P+{l5}|(8m%) zb8vmz8#Uy;d@_x7@t@OttU1!xb7_Ef~G zrctZiGUU#{{jvs`acxK}Bk>xAK41ZA1KFv?+SZjmavYjiuICRAsBr@tuCcUd(c`0H z$slta5wDKtb#5R_wb0{Z9XXC(zYhv4SE}d#-36LD0K*K^>Ucz}LamMV48*|GkU1jaf< zbtgpz)YE@!-fEZHscd*L+$mP$$g{GHbSz=KNI5pn-A2vMxCTjyg`7}1LG`}~=P#n3 zZ-Dtb-ZG-!wa-jxsrvuaQT?vIOJi6NFlDzH#U`qlOaP1%+F-+BSa4JroBZJcC38DB zHVtI0q<6{GH5H4>w3=n(Ft6g*&+w^U66*SqSJj#d&RiCRjOrak#=l;M3o(}tn{3_k z*z|SAi+tOYC`ZEkBpLaOWzK{h;WGU}VJlVn9sL?Hnd{ZZ$C}yzHN>o`#n9G-SUwwB zcH)_o4NREVNV0$9z+VBeB%B*rGws93_m_dW3FE0pUm*K9hiU9AGuSDI#Q>WkTnf7B zN*vQEtgMHYaXHn!6|KoP?4@sbIAXCmhy#A zuY3Md#*C!YbahVj1NmkP1m7C zsJv|W%jM#CL<#kPN_3*YMF=|HoW`*1=#%$`M6wEXV!y6*XIB|EH)hnvYI%iASw1rr zlld2u&vd-@IOaInf|pG@?#EJvx%%>tN2CCcZ&iwqSUV5nQSL}HlggF2Hhl_6arZ*b> z%>w8H_JBJOr-rSE*uWKOL82s;Me@9T)Do80a4$J_Od6b%Eb-_0wQ&ioL-#l&&aa_3 zKW(jo7)M;l^L8r-1aw~PMkBp(H|5?T6He=(W}N%k5X}8j6og?W=X2c{r}?4z;hpO+ zq7F-zhS@~6O*lNsBumUsH~gEeSwL`yv9=3mKvE{QDm+NBz6rpgX+o4{mMCTAbmzoGh(|{@1aS&E$jHDH~Ui8uBdT z=4rC8w5`J0tLT_Sf+cKup)FKo&!NB7dd(r=&f0O1IA?T%QfZZ-sXuXOg+h z@1Wbu;Vx|CW!FkM2HB9ZH9O9f2_}BT;daJB%$V<)EXB0UqzvDK>=Y>()m+g5x^1?1 zYS&M&-vu8pM&L5)i4Hn$Me;ooR%(GXnHUt0n(@pR0@}}4A4Q~4vS9kCF zS3R3i_>Qxn*jBwinvm&79Dabs4NS@a$*{y(s~kK>?eHvcI^9$W2jy)3SPp%KW}e`qr2hF^}qgYr(f1$8N3Cg*7ao# zhnq0CJ%8J#s`s{*S&{ zPg$*)ZiZ2dr`pP&-dX4)B@gBKYE|(wCQM`UX6O2a!}##KgzKQg;+mUuttHG(D~DYb zujR3CA^i`EIlPpV%tj z{Ysd1`Tr;7{GWE~kpKTI#c`Zm%8xKP`;9L@n%u>9&148&j{4?Y-|A$bypyny;JEraP0q==&udtKV=>~=|ZU0^Z&SYd|bT$>CNf!A^-1LN}T_PAz*X_ zJEZ?<>DXuDUjgAxY9rDotw)k$3DfwvxH6{+oti_~i3L?Wyk6uZ|32X;PD#lk+$D3c z)Y2Ep8MJV5$QFFDY(ZpMkZ zPPmM)@1&>fQz^+z2i@=4)LF}1&%>;r@8-cDH@{K0XHS`pw(hje>r~57A}?1YLdI9B zILUn`PN?c1GDWIAxh213;hM&zxGV@T^-50PX1=SNYJ?hMmi4?=iefD2bK=8P?WqcZ zfl3E#xA(SxF}Qnob3M524Z3zmY@ISns05B(N#>=5MuVw87~Ni6UERICx_h$_R_ecb*&rq zZtp%`{Oj&|*c<48>D}Pd$DK%)?oz6xspvh4RInMHZhv&~_NsUHY1qBE?RDSoM5}Po zQ-Rp}ID)c%oJg#SA}Ek%5hPb1q`R)Z0@*6ny^!~sqJEBq$Sq^rhosy;cT%pNSDWUW zDLnJ@qm2V=8u_mIul5_5H;^~WwP^d5y0rw#l3=T{qXd2n>f1|W8v8DR#T$y{py@z% zh~@13`yaYD{m;FdyPMwoyV3RKhr8j$?LW&H@-ns}YY^_!ZG2L!R-D1S=4S8xHYC9k zFb~e{s0!+R$GpVTP3*~?0{WQ++tOS+u8i=RC~n+z^!V5e{b)+4XjaBBY)pKUgn+bi zZbVk2$b#;w11(eWa1!7dP6C*9ZeM2r@*Bcz&E*D2r+%(PV!QwaTqU0Ez5)w61x>v3 z?|=C4>22?BaNX_Q4K6;<0;?bc*?E;hvzufvT6%x-Cl7P;wd)YJbX($ow=;FoRtM#N88)q?sQEaQIHE0 zrIk%Smoj$tE0KqzIWK-R#~uf-&bLSy&oakC*<60xuc@uYrv92$p0z{}l`&a*oyX{t zaWi0Ez~S9DLrztu*k&D!dYp5tz@mrq}${a=izwapQ}`#|KVfoI(xYUe9ig4v*WYl!uh}U zo7S7d`Jd+~g(<2Ay1hyToHer}i#xdTEP8b@-*IQdPl!n zz`X=cv7fz6RbPC&`}skxlvhW88~}$zANdm<^mD!bi^i0(|9`HB%QJqlB^v|HfsZx7 z6lDE1s{2If|MgZt{bBk^{pBgk26Vk02)aHd!Nye0*(MvU{Mk(#Cu@D=!bHg^l z9H`u(=i=79M)k0Y-1=s@f~e(quMK(Jd8q z7L&Gc`LQ#ON$q~*HyZy3Tp7<=)09!}^iC}`c(%DBe3K)IO54PthqB6hivCK(Vnx4K z4c<3Zweej%e+D(Xfr{9(mVFY1run>Gk{1F8^6o&7gOPm+O#~T&&|*0>ztfY~a?C&Z2u*_x;?1YW?pi+w1@B zcLHnm|LL28{y#cBJ$(QFS;~v)|J0y7X#RudKa@i`ltVd`XDDc zVQyr3R8em|NM&qo0PH<&b0Rm=`OL3SvAMe3Jv)H$%jT%^2d`l_)Os-m-p$>UO1Z>L z13J%06G_9GIR5WXNt$8aVZf}t$%FDC>zP(}OVet#`bA7!HnD6$EQgZ?@aE+lbJzjdl{JKHL57?yh*A~6s}WOz>&KlEG>;pnjn*cT zE?EEtA%>hypc=)fi?6yQ=(QV1t<3}}bD*mi+6ZPuNRGD{sOlUpQ;SF4Q!MKy5&hAw~f)jy%Qz39vax zp&m~F1p*xOWe!x6A#{ecxC(`W!?c6uQu?CPOfcObtm!aYG$j^yqG>ZaC9{Boy8aK9rVkuwO7bOW_$CN;m2EkfTaN$U=M0F z0enGhxUg*&P^sqE=71%1kmLcNSLkOo6!q`dD1=4W01NbmxUL$r>EfU%<7ml(1{#A@ z(=o-a07RzhA^L);ei#T)e{;mX(1xPNsN~Fbfp^rGw#;H&J+uTyuaIIEu_+pdEQI`q za~{!WTu*-}vmoS^ zF@;3H7i0m5gJ1!-0f{-|va0mk zjN0JKcme3yX{trwhomi^19;l3Yu5ZEKLh1v!fRt)UW3}itx=VviQJ5?d++)vo z0d>^h8ITToP`EF|C!s|BsMR_`kF`9|W38505-VXVSQ<($m3q3=^A)e^4#G--S^A~f zYn*au;bR**NPQ5?%wG*DSz}*%p08$i)n_ICb0*{fvg^3(6q^ zPh4=65{MZ`4g)o>5;@=lbr7TIxa^?zzs`=pg-9K(FaXtmL6%6jJo?((p5MF@^Y*%VtXE-3~g{xQrwVL#}AQSA2Oyis&!J zyr=i3(^l&v37>NKEdY^(vi(3wftTynj(Qc0x189~6N$yvQaO$G6KP~O@~<^@cseC& zIfp7vY5y@O)yjDINHQ0AsC_jS3*Eu8gX(&x;LL?aT0?nSg6bhf=BwTcTfKKrgcQ*W zm`=f#9aJB%u{{STaG_oaf4De&C5&I4iuD9CHNCaw)slMrVhTyW?NgUf_{`kEgAcI0 zux$|HXCmi)2dLEI5G`^K-+!Wbfr+cMQwJSY(=Nw3{Qy)Uc z>i;sQK+`i++@E|0OX$AM4eAoxa~NVd@1Ulqt#s@wRenP}7)$K=eX2paB%yXaWoiw9 zdqfCh__W@Lu;zggJ8^MWv^Y#KrvHSJOetQ2J^#|{+U!$KdMuYy~{cF@4jjl(^;!OuT>%_TIrSEEr{(*@Z<%?0L7mrR=BlbT-BlCutSU_vmh)s+3_GVHAMqt?g-sO)z` zGOy4tOq<)-{T216+Fri6=&Hr8Y%FtD(T62&VIjzjf`dFv@l4r#Aznm`u}7%xFVqoC z0#)ZhEcBz$VAlktI&cbgfYc!UF8V^`T!U>#H6+pRD)|d>>0!OJ|LV}5*0TOz37+p_ z38Fc;zUhq{@?Jif#-{ks@mV|L|2;iBYaRT*&(Us_5?UA2Q3_`k9z`ASUA6@6LD8Oy zP}ekSwHt=)IoNj_kQvv8#4-@Ck?#T)pbn6oTBvSq=_1Rbk>z^+@PHa4&~S~FJ&PV6 z6-x%0Wr=WmJg;*Dnd^lfAM41n^ag&=SgBUN{%_~t`oKj)`cDi6ZCL;9%%#Pq zTaP@neXYqdJ@zQd5%E5azWl{1XQCM4D*ZuW%XRq!{T48pn$^e0nmPeB#Js7+uxJRO zd^VEogexf-m@u!AWdFgQy9Q!KIQ3=CviAemT?OW*jK>~*fr`JgAIHuTA3NqS8(>p} zb3r%ViDNp0wf)d4E~loqWHjl4y|m8#mRkA#Ul1hOF)d{M{@-r3v-w}`)|)r&!~Xvq z?T%2VgSy(V_khY;G9e86zGyCvYEjHHcJZjS7AcchJZQK*7^u+qTxP=8P`FzdCzg(y zZbFMte%o+Y+r=M<66yez=tO}FA9TDqj$sv}kKP*+(Js^p9lO$=UnSVA%xDp-`5i81 z`OI{T)?Y|IG4a~vnBrs$UaC&qkGU?f*3P=P8j*i1*~hs2BG5+UKx%3aeO>Zk6)G|K zL+xBbauu16Ge6Oc$-wMXp_y3FxNXxyyVlR!q~+UnE;9VrE_KmS>=*`oxm1jNO{Ot zA3kk@G0{QVty?B&Np%nhxf6o4K9(7JcvEvzF4b9L$>4bflBvLB3FUP{mz=PSeYi=` z3erRpA1n7ZY1~GiJDsAjVqJi(4ErHx>I@n`M@XGVJ7^S8Nj&J96Xe?QCE_D7;Z)FM zixJ1RZp@RFh}R~w55vzq$70AqpYWRX^s;KClO7sz_FYnGh3m9~>WRrU{i7^R^!S*2 zw+?q_6STxL^!Qje6W|J{$j|<0ruPAaFrSH}7^rElTZ{8{J@-)Cuh?-o2yy=X4;S6; zs5c(}qX5ta>;ShQOby!%QNa~ifJ8|u3*`AxyVVi}@8Mo@Y?(YbE864F)0^W8SjXC1asOkUFH9lMoXXH=*<>2{~?td&L(>%OO1R zI*yv70y`sRErx&@b`uuJQ9H?SG-**F-ZH7f6_iCPbiZF+u?BU1*`|Sa)D-)oSVd2y zHA0y{8c6*$Rx9>cMF*{i7MG3Mn>!|W*7yWy|4-C2t=kqh%7iuWI&j?V~{=imQuIqu)~2HoLx ze{fT@BZkSLtHsi4=>Iy!WD}{R#gvT~NqzDpY+futBw zjKLDx%U*yelXFtSkxs8O+0&MBep=aKB_^gY`EEPRv6WE~{Tc&A=kQtJ`Er?}SAxDU zAuxkk>A#$WuB>B|*Y_0lh*tZvf%rZ1&gV=ro4Flyb2;3FjkN3*QVu~XQZCG_Gi{2A z8*sRpa}aapIwrv}ZgVO9_aHl2N=7x8On@F+(K|wkB-rhOi-GjW{fn#Hcm1ngkx}@LGcP+?l!r=D&ZNpw#sNP@Sj=LXjudd(U zUiCiruBuYaXAG811N;{22&MB-T_>)LCAROh5-gnx!(ealxj(udeC!QwwAc?XZhk5` z4}r83B|1&bqEK`+;l_Ujqnjsav-P-&ygl2QDcd?+QLaGN%8 znN{x`uyak_Wr2QJzmgKnYTshR7b=Y{QnY zDdw07?371*ibOWsxiFZU;NYc`CjLULLjF%^TJL@(%%=SR)3dW|{?F0j{+H)zmSv?< zeuT-{Z`}Q8au<(kCPV0Q+&6`N>yv@fp1w?q&kngeSW0ZqNAO!fI5=_jqtBc^ow941 ztZd8Gvn3b{!(WXQgkjNaeOF`(_R@CYBXeL?ST|JjBBZ%HMLp>GbMSy;R~O>l0Uf0( z{49h6hk#C;#XjjYn|d3%=-w#U-n~7?bOzs$0;{n8DRbaS7ecLH|HrMa0R)8j+_ z|Fg6({||%5=oofL|I^a3&&0nB!kyGcpi8aCl4A+8uwGo*#DvaF6?S9>WsBFde5Bt; z9EB+zNh^x-iDT2d(H|0)R zpGTMgbf}1S^(Y4{I$vy)K&V*UHFYqTb7oe~?%8y4u4hDAA-l*uHP;9z1E?EtLTBLx zB81NRGs;$vd-q@m`j*br886A;4RB9n11*-(RlR$kLwJ`&K^P^x4n>;#0P-!qf?&X8 zpBBP|#rlQs6om!hGQh4Cm)XZsl2{IU-jk`b1#3MGvw6Ln2j6afqaM#*GaJ>+T&~G( z3@=KtqFYKVUoG}S4oA6DuH8Ll0_~_(O~Ki#y1yNSGRAkuHU}< zG%gGTr{`YFbn0fGM&@>&0*|mE77@Aq+#8Mi*MpsiW$p84$f)hyXMdqUjSV}-U_Z|+ppAZ zB#@VcqAoj1;MbtOy)?%0^Bh=so>_e~9SrxeoS%RHLwD5w+#B7Fdhc(?*Own|hZi?L z@ z5Iz$L4u_5&ADg}#%m@|D(hy1!6JI5%Anu&{BI{8kL3h=G1xxWzP4EoW1ZJPxR~cC*b`)0^6%fTR+CyA+V5H~Z@1 zIwjybC32eimApKF?re-=;e)N!lm1blA`TBFE?=X>W!KQibCkxcI|3FSuQ>DZ44a=^ zd`ldi{j9ZIAq%>Xwzt1tg-#lgmkMabolQEKGIsVWk%#i07ccKA4#StHdqj*UnPa|e zu6|musjbDP{xPjIYmp!-V>0(TjnO5OrpKJXh3srbk%Ng<{C>aXO&O;~VPVs@^JgU| zBKq}-gWIFr?NJt3PfcL`<+q%zL@C95sb{SEKDPHY_`I82?!%W>E)|4B*H*zKK{tj( zH(|T4rlmD>L@0)QBF6 z7Ykc?{STjD+uF+|;2Y}y&W_KHGxdK*Z(4`@zn`ULR)YHI4RbN2zPLXu{xd)GGh!~J z3WPK?4&Dvq<_m|y`^?l5!VXkMebUq){8uxvrpuD90#Jqe-B7oqdf(o4NehnuMfwFZm57{{>rytJVx`sDGMW!DfzRd&p3%9E0|EF96vf=!HbbQ>--v4uQe0(_nKS$ee{(shu zK-VFBOIyGTDgNXvkw{ra-3IH|&@Dxw*CjH;a{r=-g>)`)= zo|cP*qz%1zC&;8g7AlgpC$tSVdz0 zaTSgY+(yz#bPt<;Klh+o|2l1Z{lEQ5V59y&eUs7u?bDOP{eREWUQGYT2IWEXA2k1= a9onHC+Mzvj`@aAH0RR6FDUVzLr~m-Ri?A91 literal 0 HcmV?d00001 diff --git a/charts/v1.23.4/blob-csi-driver/Chart.yaml b/charts/v1.23.4/blob-csi-driver/Chart.yaml new file mode 100644 index 000000000..28ef49829 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: v1.23.4 +description: Azure Blob Storage CSI driver +name: blob-csi-driver +version: v1.23.4 diff --git a/charts/v1.23.4/blob-csi-driver/templates/NOTES.txt b/charts/v1.23.4/blob-csi-driver/templates/NOTES.txt new file mode 100644 index 000000000..9ad135dd4 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/NOTES.txt @@ -0,0 +1,5 @@ +The Azure Blob Storage CSI driver is getting deployed to your cluster. + +To check Azure Blob Storage CSI driver pods status, please run: + + kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.23.4/blob-csi-driver/templates/_helpers.tpl b/charts/v1.23.4/blob-csi-driver/templates/_helpers.tpl new file mode 100644 index 000000000..d99392f32 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* Expand the name of the chart.*/}} +{{- define "blob.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "blob.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common selectors. +*/}} +{{- define "blob.selectorLabels" -}} +app.kubernetes.io/name: {{ template "blob.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels. +*/}} +{{- define "blob.labels" -}} +{{- include "blob.selectorLabels" . }} +app.kubernetes.io/component: csi-driver +app.kubernetes.io/part-of: {{ template "blob.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +helm.sh/chart: {{ template "blob.chart" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- end -}} + + +{{/* pull secrets for containers */}} +{{- define "blob.pullSecrets" -}} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/v1.23.4/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-controller.yaml new file mode 100644 index 000000000..9ece72de3 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-controller.yaml @@ -0,0 +1,216 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Values.controller.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controller.replicas }} + selector: + matchLabels: + app: {{ .Values.controller.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.controller.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- with .Values.controller.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + serviceAccountName: {{ .Values.serviceAccount.controller }} + nodeSelector: + kubernetes.io/os: linux + {{- if .Values.controller.runOnMaster}} + node-role.kubernetes.io/master: "" + {{- end}} + {{- if .Values.controller.runOnControlPlane}} + node-role.kubernetes.io/control-plane: "" + {{- end}} +{{- with .Values.controller.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.controller.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + containers: + - name: csi-provisioner +{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- else }} + image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" +{{- end }} + args: + - "-v=2" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - "--timeout=1200s" + - "--extra-create-metadata=true" + - "--kube-api-qps=50" + - "--kube-api-burst=100" + - "--feature-gates=HonorPVReclaimPolicy=true" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} + - name: liveness-probe +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.controller.livenessProbe.healthPort }} + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.controller.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" + - "--drivername={{ .Values.driver.name }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" + - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" + ports: + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} + - name: csi-resizer +{{- if hasPrefix "/" .Values.image.csiResizer.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- else }} + image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" +{{- end }} + args: + - "-csi-address=$(ADDRESS)" + - "-v=2" + - "-leader-election" + - "--leader-election-namespace={{ .Release.Namespace }}" + - '-handle-volume-inuse-error=false' + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} + volumes: + - name: socket-dir + emptyDir: {} + - name: azure-cred + hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.23.4/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-driver.yaml new file mode 100644 index 000000000..9c5de5b91 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-driver.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ .Values.driver.name }} + labels: + {{- include "blob.labels" . | nindent 4 }} +spec: + attachRequired: false + podInfoOnMount: true + fsGroupPolicy: {{ .Values.feature.fsGroupPolicy }} + volumeLifecycleModes: + - Persistent + - Ephemeral + tokenRequests: + - audience: api://AzureADTokenExchange diff --git a/charts/v1.23.4/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-node.yaml new file mode 100644 index 000000000..842f76e47 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/csi-blob-node.yaml @@ -0,0 +1,326 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.node.name }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 4 }} +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: {{ .Values.node.maxUnavailable }} + type: RollingUpdate + selector: + matchLabels: + app: {{ .Values.node.name }} + {{- include "blob.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ .Values.node.name }} + {{- include "blob.labels" . | nindent 8 }} + {{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + {{- end }} + {{- if .Values.podLabels }} +{{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if or .Values.node.enableBlobfuseProxy .Values.node.enableAznfsMount }} + hostPID: true + {{- end }} + hostNetwork: true + dnsPolicy: Default + serviceAccountName: {{ .Values.serviceAccount.node }} + nodeSelector: + kubernetes.io/os: linux +{{- with .Values.node.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: NotIn + values: + - virtual-kubelet + {{- if .Values.node.affinity }} +{{- toYaml .Values.node.affinity | nindent 8 }} + {{- end }} + priorityClassName: {{ .Values.priorityClassName | quote }} + securityContext: + seccompProfile: + type: RuntimeDefault +{{- with .Values.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + initContainers: + - name: install-blobfuse-proxy +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + imagePullPolicy: IfNotPresent + command: + - "/blobfuse-proxy/init.sh" + securityContext: + privileged: true + env: + - name: DEBIAN_FRONTEND + value: "noninteractive" + - name: INSTALL_BLOBFUSE + value: "{{ .Values.node.blobfuseProxy.installBlobfuse }}" + - name: BLOBFUSE_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuseVersion }}" + - name: INSTALL_BLOBFUSE2 + value: "{{ .Values.node.blobfuseProxy.installBlobfuse2 }}" + - name: BLOBFUSE2_VERSION + value: "{{ .Values.node.blobfuseProxy.blobfuse2Version }}" + - name: INSTALL_BLOBFUSE_PROXY + value: "{{ .Values.node.enableBlobfuseProxy }}" + - name: SET_MAX_OPEN_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.setMaxOpenFileNum }}" + - name: MAX_FILE_NUM + value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" + - name: DISABLE_UPDATEDB + value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" + volumeMounts: + - name: host-usr + mountPath: /host/usr + - name: host-usr-local + mountPath: /host/usr/local + - name: host-etc + mountPath: /host/etc + containers: + - name: liveness-probe + imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} + volumeMounts: + - mountPath: /csi + name: socket-dir +{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- else }} + image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" +{{- end }} + args: + - --csi-address=/csi/csi.sock + - --probe-timeout=3s + - --health-port={{ .Values.node.livenessProbe.healthPort }} + - --v=2 + resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} + - name: node-driver-registrar +{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- else }} + image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" +{{- end }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} + - name: blob +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + args: + - "--v={{ .Values.node.logLevel }}" + - "--endpoint=$(CSI_ENDPOINT)" + - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" + - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--drivername={{ .Values.driver.name }}" + - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" + - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" + - "--custom-user-agent={{ .Values.driver.customUserAgent }}" + - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" + - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" + - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}" + - "--append-timestamp-cache-dir={{ .Values.node.appendTimeStampInCacheDir }}" + - "--mount-permissions={{ .Values.node.mountPermissions }}" + - "--allow-inline-volume-key-access-with-idenitity={{ .Values.node.allowInlineVolumeKeyAccessWithIdentity }}" + - "--enable-aznfs-mount={{ .Values.node.enableAznfsMount }}" + ports: + - containerPort: {{ .Values.node.livenessProbe.healthPort }} + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 30 + env: + - name: AZURE_CREDENTIAL_FILE + valueFrom: + configMapKeyRef: + name: azure-cred-file + key: path + optional: true + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: BLOBFUSE_PROXY_ENDPOINT + value: unix:///csi/blobfuse-proxy.sock + {{- if ne .Values.driver.httpsProxy "" }} + - name: HTTPS_PROXY + value: {{ .Values.driver.httpsProxy }} + {{- end }} + {{- if ne .Values.driver.httpProxy "" }} + - name: HTTP_PROXY + value: {{ .Values.driver.httpProxy }} + {{- end }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: AZURE_GO_SDK_LOG_LEVEL + value: {{ .Values.driver.azureGoSDKLogLevel }} + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: AZURE_ENVIRONMENT_FILEPATH + value: /etc/kubernetes/azurestackcloud.json + {{- end }} + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /etc/kubernetes/ + name: azure-cred + - mountPath: /mnt + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + mountPath: /etc/ssl/certs + readOnly: true + - name: ssl-pki + mountPath: /etc/pki/ca-trust/extracted + readOnly: true + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: /lib/modules + name: lib-modules + readOnly: true + {{- end }} + resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} +{{- if .Values.node.enableAznfsMount }} + - name: aznfswatchdog +{{- if hasPrefix "/" .Values.image.blob.repository }} + image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- else }} + image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" +{{- end }} + command: + - "aznfswatchdog" + imagePullPolicy: {{ .Values.image.blob.pullPolicy }} + securityContext: + privileged: true + resources: {{- toYaml .Values.node.resources.aznfswatchdog | nindent 12 }} + volumeMounts: + - mountPath: /opt/microsoft/aznfs/data + name: aznfs-data + - mountPath: {{ .Values.linux.kubelet }}/ + mountPropagation: Bidirectional + name: mountpoint-dir +{{- end }} + volumes: + - name: host-usr + hostPath: + path: /usr + - name: host-usr-local + hostPath: + path: /usr/local + - name: host-etc + hostPath: + path: /etc + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/ + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: {{ .Values.linux.kubelet }}/plugins_registry/ + type: DirectoryOrCreate + name: registration-dir + - hostPath: + path: /etc/kubernetes/ + type: DirectoryOrCreate + name: azure-cred + - hostPath: + path: {{ .Values.node.blobfuseCachePath }} + name: blob-cache + {{- if eq .Values.cloud "AzureStackCloud" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + {{- end }} + {{- if eq .Values.linux.distro "fedora" }} + - name: ssl + hostPath: + path: /etc/ssl/certs + - name: ssl-pki + hostPath: + path: /etc/pki/ca-trust/extracted + {{- end }} + {{- if .Values.node.enableAznfsMount }} + - hostPath: + path: /opt/microsoft/aznfs/data + type: DirectoryOrCreate + name: aznfs-data + - name: lib-modules + hostPath: + path: /lib/modules + type: DirectoryOrCreate + {{- end }} + {{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 8 }} + {{- end }} diff --git a/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-controller.yaml new file mode 100644 index 000000000..833dcc640 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-controller.yaml @@ -0,0 +1,115 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-provisioner-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-provisioner-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-external-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.rbac.name }}-csi-resizer-role + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Values.rbac.name }}-external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-controller-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-controller-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-node.yaml new file mode 100644 index 000000000..c041cf8db --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/rbac-csi-blob-node.yaml @@ -0,0 +1,29 @@ +{{- if .Values.rbac.create -}} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-role + labels: + {{- include "blob.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-{{ .Values.rbac.name }}-node-secret-binding + labels: + {{- include "blob.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-{{ .Values.rbac.name }}-node-secret-role + apiGroup: rbac.authorization.k8s.io +{{ end }} diff --git a/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml new file mode 100644 index 000000000..7433bccf1 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.controller }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml new file mode 100644 index 000000000..a25090e30 --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.node }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "blob.labels" . | nindent 4 }} +{{- if .Values.workloadIdentity.clientID }} + azure.workload.identity/use: "true" + annotations: + azure.workload.identity/client-id: {{ .Values.workloadIdentity.clientID }} +{{- if .Values.workloadIdentity.tenantID }} + azure.workload.identity/tenant-id: {{ .Values.workloadIdentity.tenantID }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/v1.23.4/blob-csi-driver/values.yaml b/charts/v1.23.4/blob-csi-driver/values.yaml new file mode 100644 index 000000000..7fd3638bf --- /dev/null +++ b/charts/v1.23.4/blob-csi-driver/values.yaml @@ -0,0 +1,180 @@ +image: + baseRepo: mcr.microsoft.com + blob: + repository: /oss/kubernetes-csi/blob-csi + tag: v1.23.4 + pullPolicy: IfNotPresent + csiProvisioner: + repository: /oss/kubernetes-csi/csi-provisioner + tag: v3.5.0 + pullPolicy: IfNotPresent + livenessProbe: + repository: /oss/kubernetes-csi/livenessprobe + tag: v2.10.0 + pullPolicy: IfNotPresent + nodeDriverRegistrar: + repository: /oss/kubernetes-csi/csi-node-driver-registrar + tag: v2.8.0 + pullPolicy: IfNotPresent + csiResizer: + repository: /oss/kubernetes-csi/csi-resizer + tag: v1.8.0 + pullPolicy: IfNotPresent + +cloud: AzurePublicCloud + +## Reference to one or more secrets to be used when pulling images +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: myRegistryKeySecretName + +serviceAccount: + create: true # When true, service accounts will be created for you. Set to false if you want to use your own. + controller: csi-blob-controller-sa # Name of Service Account to be created or used + node: csi-blob-node-sa # Name of Service Account to be created or used + +rbac: + create: true + name: blob + +## Collection of annotations to add to all the pods +podAnnotations: {} +## Collection of labels to add to all the pods +podLabels: {} +# -- Custom labels to add into metadata +customLabels: {} + # k8s-app: blob-csi-driver + +## Leverage a PriorityClass to ensure your pods survive resource shortages +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: system-cluster-critical +## Security context give the opportunity to run container as nonroot by setting a securityContext +## by example : +## securityContext: { runAsUser: 1001 } +securityContext: {} + +controller: + name: csi-blob-controller + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting + metricsPort: 29634 + livenessProbe: + healthPort: 29632 + replicas: 2 + runOnMaster: false + runOnControlPlane: false + logLevel: 5 + resources: + csiProvisioner: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 200Mi + requests: + cpu: 10m + memory: 20Mi + csiResizer: + limits: + memory: 500Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/controlplane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + +node: + name: csi-blob-node + cloudConfigSecretName: azure-cloud-provider + cloudConfigSecretNamespace: kube-system + allowEmptyCloudConfig: true + allowInlineVolumeKeyAccessWithIdentity: false + maxUnavailable: 1 + livenessProbe: + healthPort: 29633 + logLevel: 5 + enableBlobfuseProxy: true + blobfuseProxy: + installBlobfuse: true + blobfuseVersion: "1.4.5" + installBlobfuse2: true + blobfuse2Version: "2.1.2" + setMaxOpenFileNum: true + maxOpenFileNum: "9000000" + disableUpdateDB: true + blobfuseCachePath: /mnt + appendTimeStampInCacheDir: false + mountPermissions: 0777 + resources: + livenessProbe: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + nodeDriverRegistrar: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + blob: + limits: + memory: 2100Mi + requests: + cpu: 10m + memory: 20Mi + aznfswatchdog: + limits: + memory: 100Mi + requests: + cpu: 10m + memory: 20Mi + affinity: {} + nodeSelector: {} + tolerations: + - operator: "Exists" + enableAznfsMount: true + +feature: + fsGroupPolicy: ReadWriteOnceWithFSType + enableGetVolumeStats: false + +driver: + name: blob.csi.azure.com + customUserAgent: "" + userAgentSuffix: "OSS-helm" + azureGoSDKLogLevel: "" # available values: ""(no logs), DEBUG, INFO, WARNING, ERROR + httpsProxy: "" + httpProxy: "" + +linux: + kubelet: /var/lib/kubelet + distro: debian + +workloadIdentity: + clientID: "" + # [optional] If the AAD application or user-assigned managed identity is not in the same tenant as the cluster + # then set tenantID with the application or user-assigned managed identity tenant ID + tenantID: "" diff --git a/charts/v1.24.1/blob-csi-driver-v1.24.1.tgz b/charts/v1.24.1/blob-csi-driver-v1.24.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..cbf94c473dcf302fc9f2679f815d76fcbcc3a9bd GIT binary patch literal 5981 zcmV-j7ozANiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjbKAJ__`TMrz$n-0wf|60en_hszpyPO)x?oKl9OxyH#Z)L zTuG=&fCqqzbv}Rh-vA)RXNi&(J8i|hh(#`T7vN&C*avXpvWaC2VmX{FfHyDan9Ifz zd+rZ=yIQSQ>-6|o|KDo0vj4Z*rzbzOk55}C?~dE8&J}}G>;ptMr#vE zmn?vS5I3Anpc=)fiLaU@=ye+H&Srv?IndJ+Z3HtSB*$BHR5cE#sl}t_X%;H<*%o93 zg8aVC5OdQY^-BNVI9gAtX1gqK!WH8tn7G8gRCj8%H*e4groe&PfFwhV0%9C_i~|y2 zbC5#ao&X92IOyvfs3t?`3~6~43I&I07tN*gMYow?xYl3ZJEsSw1HVEFl`xWwWO5-vq8ZHr#a4FTd8_APbrG&0B_VZ}}0H24sjm zsMQ4U1+n4Wwpl=>8ef|Ome56#2Y}w7pH)}X-)~U}i?9I}=qqtu)o0VhK~u)jk_8Pk z2C0T)id_MSOw~>F6;pjP5TJf@#J-6lzQ_WisOq{bCvth|VxjQ^70R;GFHWO0k;8(Ipeab z^xKTu;LCUd=+SAaN#KX1EuRB;+OBKX{3Jg^{YJ!6NboG%4TJ=4*_=pXV^>ihgB>Vn zEpPB3(M)lw#*_Iz<1(P?p=5{$6s;y5!U9o7Ib#w{mPmksL&Yz0$8$)!?HmwLS7S=~igDKJYv zG>eT>4lR6aLl>zRVwv%)Ath_GqUJTEB$g9f+%PV?sPp&JqvPVWZw}a%^BAmC z)60WPY%ID+{|e|e{e+b~bd5raZeALGZd^(Lx-nR zqNa1G;*|CugHo-Ghqokifrr{xW3kXZEW4<#R|?KtXrwiirzNN!Qe?hrt+3Vm4@5{2 zJ%H&HY}rNiAsgFsZ~_+pngO^wR-#3574r4LtY= z%X8ZXA$}%uK5&3aEe_En_wd7KiWiu;N;`E?yGZ4nR8u)eIi&$qVZCtKWEu##;p}0# z;&hU_s}T|^BzD~hegzr>ulyVJcH_8lQZH)ODQMS;8+IC_v4&dj-o4A~-WSz|rMsS2R>03Cf`6w|@fAyZ?y%Xn?a@w3xKJ0F`E;&W z|D<;*M(%1;z%t-k7N+6@XMrE>>PCQ_pE;3mO>Iyr;Qjb^>B9=w2arZjj3rikB_oS9 zWA}Db27bd9q@mSU=vOc{v@+t(XP}s=D;|UB_c)kN$wL>_ug7C+4z8!3>h>SlxcBjD zr77xl^aibT5HemYb^9mEltK}Id5e1ei_ah4qQUU}^;`7w`Di#8et3)eqtW$9nPJM7 z4%LfhLfwde*IYt_2h|&;N?nj$)LdZRbjhR{-l^$1EjjBV2POp5T1^>sF2l}BX=*i1 zfXYEHB=ZLS!n6&J-CxmQs*ULL^PZaQ%J4E97`=ny78ZicC^*Q&6wj0)7~)05Sb&7; z_CoE;Bv7?U#6sT+t$0mPs$Hm1yGji*=%KGf&NbL}R6`Pjo|3;1m+sbUd$kT%)hp}& zmEid)ZagAs^(kX>5xB9JksT|L@7^$;rY0`x4i!Qa9^jI!d+7!kwrCzRQ-N z9VpsS5o(o2t#-?hJqP=K12W^;kXQ!dE%IH!0@VJoTMHGdEnQ?;G%{Sz9v@L-1RAcf zGH21#lVZsrvn&xVglBc^`xzTuVag#vPfvAZS$gIEUsqM-^Z#xRt`A%^r2ouN(1!Wn zIXXSg%>UDq)7D}Bzr^+U*!(M65U;BZf+=x9E`2!j)Ueod=%T+ipPp(p4c33~F?F<^ zKts>3HZ63@fQEZ~w2%W+LIKsay{F*n$ci7~z?GRx)QTC`ATcqY z6Dp^u{wHDmNz~IFFn@=;L-dDsnJFz*|9^EA+(i>uGn}%8KS~rBgy`SJ$DVnig4=9nrZI`uDgoMO&O0J`T`ZdXFm>|C0=&S zVK%;|2`9eZt^`#oLd>wiIvWXH6S_3M8}{Y$U^oz}b4 zo?j){tjuT; ztN9f!X8F`KjOJfRJ~8mxxtQW)3tp-Y+~0CdVy&HZW;G&DR@Q2#DgybqPA8GK+LKZ(?(}lzWkVc)z8i?B@5)QfrOJ}CpA1FyR(;*{(>ZbOu z6}+U#t(zbpIeL?{*OSSOTJbWeuA)UY+rwPzH1U#Gmx^K+OJo8hZkJA(8D^DB`f&nV zj3MOBxo7KecQ-*xJVQ@Ubu$32fQtO=k7jxuKnU}hNQ!|P_PVt=>(p~Mwf%}6hl3Dj zKmK&y>y7&3@m~r6UBC`-3&PB>%@7q_kp)PUq_RMswL7hrD0mJJl4HxH!CBE7f0kYx zSHL=Sk9}hO=8LnF);fquz=b?(w~Bzm_?|UW#{Jnp7-jtC%Lr^26VF8Yatr`nWZ!@h z{BVR*&yWeabx||S8f^$>-)Cj$CXp?=Nj&1r(zIkObg)qeFLM%N;@>7{9wi~im*HOV z1=4Z|Pdtv}lTuR>p>iCRMdFC=3yULR378^Nf&g1l!f>ByK7t(lk znF?!!U0vlfWrwMd;&$Uq(5b!^%s80Xmt#ewpJ6T)zJS+Bt4PhRZQ7q9HCaWTZw@w%){Fbb8K-`M8C!u z(dm2^c)nbw=#`)^%ni(7_8HIXo0X+(viYhAP2ut!f@GfgoJnRgw~KBsZxYW66WPo) z#l#Ib+|D_OIddJ8PZ(cHv3~&B%}~xF6A*(LpBr#O4XWv}Jz&~zpZ^m~wqw9bEtC)UX4JGhqkQOcw8#t`O;P(7& z!(LjbK3w08dmrzvu0Pye^}qD5s#47-`jrd={1(qRrL<68XQ7NGw(qqTD;)~MV1M{! zFuERo>JM+V*x#Jr{-b1b1JX{E+cY(cLebHL8~+xJZXTe`=HDue_F@~LY~xHv3lYV0 zdB1N(2zQOOsyGwEZQ8(PcA4OSoonhY3-rVKnUr8wI|>^nl@ryc;4qF~memAu%fHXQ z#4G{-o-}Q2NgfE<)UE|vX5Ps51GB>>K*{Vz+do-OTFqY;7P;)*S5}C(&}S7v?(QRd zCar24gsNV!LLb^oc*g#0%`tUcu#5t;2n_^Sz&Yb*I%)jRuj_n@#{PTXZO5#ZOgF=5 zfycU#KfN{3RV0u3`C6#rm0hemlQugyFC4~~-y>WHAC}kLq;0KWc3L@#Rq;k1^Ln{F z9=Xlxus3$fSAmVlHY^F70*IL?O?mjHNMy5(3nQ}$4qiJ0;(z<9kpB}}_drLfT0RTmz#*U$XR%MZ&8A+t&U?2Cw*O$yF`dD8r1L7wf65$q z)`3v#=l@ab^xeBm{{Op^E{tgVMWSm`to#&;LV_y@<6T6BTN7~R79(KlmixRNw!HKR4neAI+)8jGc#xRY`Qqt zGa{{!o#*bF0|S%+)C)MFv+(>7LT7^+Wvko$2e1QuN@r?Ym}Kx4xF@o)8O!LZ{)5jU zJT;;q@)DkgB2DFg{)!J97&1Aag)m`Re&I7kVL`YIuxrHy^|6#Brh^{$Wa@0eTu;Mn zp6}+y_nY6S`?J@~Ml~{*YqA@|BT=mAJQB-Sll_plQSO$_ITP_wSF9zqsyZ58(;cb7|EB30^DWil+;MkUA zkxNoEm)r74r=9wi zJ9Md(rlR*OQeHJWy}|hW;;Mi5`KEV%+wWcML@RS5R4K8KpD+4X{oA|pFm;lUaae~- zY6j=#h=#JqtU#=bXDEMNM{; zz;8i)dufcr_&Kmpp|4(=j>-F2&d+}QsW%#Y>5uM4{SSBJ>&uUNp~z!M%h;MkN_dDp z6VX*@!y3#R4lCbpMAS&y3O>!IJ#;G^59z_y#R~=X|6%SPjFHn_W*13JvBc!*($(HH8%3365hmygSC zrfzAyMx3)Be^N&H-SE2CzZ;%^>K9Sowf><(wSngOThtkpBVJQs@cmV6RH=C|9>NOM z4rxJ}fsFFkgv3&u6$F`!&P6R@R3GW9ElMG==UXQk8uE`8Y zX+_ibU1hS>?&45i^7{29IWJ~<8QY9Cy_y{gSSs^c0Y`))kK^&cZt+iYs3%ZRq zw7*@2P#Td(v+G~-JCe%E*{?(%%8OpTyr?*)U!FS>F`i_O`LenCPGM7|_c)EwC6lJdoWOSf>&Q_w7V!qTRR(&1Y`y71P#Vz;ZODm@e;-G7*aFU=KW1*WcWn!Xh;-Q-=gAzE_ zfj-NHLmlAj)B$d9mTi#2O6x;)am=kB)>_MKEo)`QUX>cv|DNK>^7!M3%RL%=uG z|D7J49%btP+V5J2@Bh5Wl@VU`(i`SrN_}vDSo~)`^D|;DqzZ&IG!EpPaq_!fp%Qh2 zYpaHI%g|o1;6VbX*i9a$st>;1y?&6ZY2DQy2f#kjC4ZueeyP`g)tC}C^DlLORpS?0 ze<9EU_(TKDK-S-)x=V!qUz^YB4>L~cFGsEUpxf<0P@lnBnX0L6Dx#IH-7KOULf9@A zqGCTQ_6Zh1ADNuts=p}Rh>@}8r= zBC%LA?qx6Uo2uII44(f4HCsVN>{-h`i$c?U-Y!dM$6gfc)#oYH7uQZozF%55?MJ}> z3zSl3ASx@Qz3LSu{#M$6`6i~ zJRCSwa8=#^Kj#pT4g3H0(NXUEU&lwMhyDLcTpRZPFS-!unt|_W33w&NpB#tpMdc?C zPIwVzUmy$zHNXF>T>pmxfQdz0aTSgY+(yz#bdQ_9pLXageE `blobfuse-proxy` is only available for debian based agent nodes, remove `blobfuse-proxy` parameter in installation steps if it's not applicable. +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.22.6/deploy/install-driver.sh | bash -s v1.22.6 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.22.6 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.22.6/deploy/uninstall-driver.sh | bash -s v1.22.6 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.22.6 +./deploy/uninstall-driver.sh v1.22.6 local +``` diff --git a/docs/install-csi-driver-v1.23.4.md b/docs/install-csi-driver-v1.23.4.md new file mode 100644 index 000000000..1f40b2b08 --- /dev/null +++ b/docs/install-csi-driver-v1.23.4.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.23.4 version on a kubernetes cluster +> `blobfuse-proxy` is supported on CoreOS(OpenShift) from v1.23.2 +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.23.4/deploy/install-driver.sh | bash -s v1.23.4 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.23.4 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.23.4/deploy/uninstall-driver.sh | bash -s v1.23.4 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.23.4 +./deploy/uninstall-driver.sh v1.23.4 local +``` diff --git a/docs/install-csi-driver-v1.24.1.md b/docs/install-csi-driver-v1.24.1.md new file mode 100644 index 000000000..ff499deae --- /dev/null +++ b/docs/install-csi-driver-v1.24.1.md @@ -0,0 +1,47 @@ +# Install Azure Blob Storage CSI driver v1.24.1 version on a kubernetes cluster +> `blobfuse-proxy` is supported on CoreOS(OpenShift) from v1.23.2 +> +If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). + +## Install with kubectl + - Option#1. remote install +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.24.1/deploy/install-driver.sh | bash -s v1.24.1 blobfuse-proxy -- +``` + + - Option#2. local install +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +./deploy/install-driver.sh v1.24.1 local,blobfuse-proxy +``` + +- check pods status: +```console +kubectl -n kube-system get pod -o wide -l app=csi-blob-controller +kubectl -n kube-system get pod -o wide -l app=csi-blob-node +``` + +example output: + +```console +NAME READY STATUS RESTARTS AGE IP NODE +csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 +csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 +csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 +``` + +### clean up Blob CSI driver +- Option#1. remote uninstall +```console +curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.24.1/deploy/uninstall-driver.sh | bash -s v1.24.1 -- +``` + + - Option#2. local uninstall +```console +git clone https://github.com/kubernetes-sigs/blob-csi-driver.git +cd blob-csi-driver +git checkout v1.24.1 +./deploy/uninstall-driver.sh v1.24.1 local +``` From a0d0c0eb5b7cb1964333802b2296f152ebe27b50 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 12 Apr 2024 07:57:50 +0000 Subject: [PATCH 131/157] cleanup: remove deprecated version in charts and kubectl install fix --- charts/index.yaml | 161 +++---------- charts/v1.0.0/blob-csi-driver-v1.0.0.tgz | Bin 3656 -> 0 bytes charts/v1.0.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 21 -- .../templates/csi-blob-controller.yaml | 164 ------------- .../templates/csi-blob-driver.yaml | 8 - .../templates/csi-blob-node.yaml | 178 -------------- .../templates/rbac-csi-blob-controller.yaml | 192 --------------- .../templates/rbac-csi-blob-node.yaml | 27 --- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.0.0/blob-csi-driver/values.yaml | 47 ---- charts/v1.1.0/blob-csi-driver-v1.1.0.tgz | Bin 4228 -> 0 bytes charts/v1.1.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 163 ------------- .../templates/csi-blob-driver.yaml | 8 - .../templates/csi-blob-node.yaml | 183 --------------- .../templates/rbac-csi-blob-controller.yaml | 186 --------------- .../templates/rbac-csi-blob-node.yaml | 26 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.1.0/blob-csi-driver/values.yaml | 117 --------- charts/v1.2.0/blob-csi-driver-v1.2.0.tgz | Bin 4220 -> 0 bytes charts/v1.2.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 180 -------------- .../templates/csi-blob-driver.yaml | 11 - .../templates/csi-blob-node.yaml | 199 ---------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.2.0/blob-csi-driver/values.yaml | 121 ---------- charts/v1.3.0/blob-csi-driver-v1.3.0.tgz | Bin 4251 -> 0 bytes charts/v1.3.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 172 -------------- .../templates/csi-blob-driver.yaml | 14 -- .../templates/csi-blob-node.yaml | 198 ---------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.3.0/blob-csi-driver/values.yaml | 133 ----------- charts/v1.4.0/blob-csi-driver-v1.4.0.tgz | Bin 4501 -> 0 bytes charts/v1.4.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 175 -------------- .../templates/csi-blob-driver.yaml | 14 -- .../templates/csi-blob-node.yaml | 198 ---------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.4.0/blob-csi-driver/values.yaml | 148 ------------ charts/v1.4.1/blob-csi-driver-v1.4.1.tgz | Bin 4502 -> 0 bytes charts/v1.4.1/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 175 -------------- .../templates/csi-blob-driver.yaml | 14 -- .../templates/csi-blob-node.yaml | 198 ---------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.4.1/blob-csi-driver/values.yaml | 148 ------------ charts/v1.5.0/blob-csi-driver-v1.5.0.tgz | Bin 4690 -> 0 bytes charts/v1.5.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/csi-blob-controller.yaml | 193 --------------- .../templates/csi-blob-driver.yaml | 14 -- .../templates/csi-blob-node.yaml | 216 ----------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.5.0/blob-csi-driver/values.yaml | 152 ------------ charts/v1.6.0/blob-csi-driver-v1.6.0.tgz | Bin 5251 -> 0 bytes charts/v1.6.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 26 -- .../templates/blobfuse-proxy.yaml | 88 ------- .../templates/csi-blob-controller.yaml | 187 --------------- .../templates/csi-blob-driver.yaml | 14 -- .../templates/csi-blob-node.yaml | 218 ----------------- .../templates/rbac-csi-blob-controller.yaml | 107 --------- .../templates/rbac-csi-blob-node.yaml | 25 -- .../serviceaccount-csi-blob-controller.yaml | 8 - .../serviceaccount-csi-blob-node.yaml | 8 - charts/v1.6.0/blob-csi-driver/values.yaml | 152 ------------ charts/v1.7.0/blob-csi-driver-v1.7.0.tgz | Bin 5580 -> 0 bytes charts/v1.7.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 49 ---- .../templates/blobfuse-proxy.yaml | 114 --------- .../templates/csi-blob-controller.yaml | 192 --------------- .../templates/csi-blob-driver.yaml | 16 -- .../templates/csi-blob-node.yaml | 220 ----------------- .../templates/rbac-csi-blob-controller.yaml | 115 --------- .../templates/rbac-csi-blob-node.yaml | 29 --- .../serviceaccount-csi-blob-controller.yaml | 9 - .../serviceaccount-csi-blob-node.yaml | 9 - charts/v1.7.0/blob-csi-driver/values.yaml | 164 ------------- charts/v1.8.0/blob-csi-driver-v1.8.0.tgz | Bin 5815 -> 0 bytes charts/v1.8.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 49 ---- .../templates/blobfuse-proxy.yaml | 126 ---------- .../templates/csi-blob-controller.yaml | 192 --------------- .../templates/csi-blob-driver.yaml | 16 -- .../templates/csi-blob-node.yaml | 222 ------------------ .../templates/rbac-csi-blob-controller.yaml | 115 --------- .../templates/rbac-csi-blob-node.yaml | 29 --- .../serviceaccount-csi-blob-controller.yaml | 9 - .../serviceaccount-csi-blob-node.yaml | 9 - charts/v1.8.0/blob-csi-driver/values.yaml | 160 ------------- charts/v1.9.0/blob-csi-driver-v1.9.0.tgz | Bin 5815 -> 0 bytes charts/v1.9.0/blob-csi-driver/Chart.yaml | 5 - .../blob-csi-driver/templates/NOTES.txt | 5 - .../blob-csi-driver/templates/_helpers.tpl | 49 ---- .../templates/blobfuse-proxy.yaml | 126 ---------- .../templates/csi-blob-controller.yaml | 192 --------------- .../templates/csi-blob-driver.yaml | 16 -- .../templates/csi-blob-node.yaml | 222 ------------------ .../templates/rbac-csi-blob-controller.yaml | 115 --------- .../templates/rbac-csi-blob-node.yaml | 29 --- .../serviceaccount-csi-blob-controller.yaml | 9 - .../serviceaccount-csi-blob-node.yaml | 9 - charts/v1.9.0/blob-csi-driver/values.yaml | 160 ------------- deploy/v1.0.0/csi-blob-controller.yaml | 140 ----------- deploy/v1.0.0/csi-blob-driver.yaml | 8 - deploy/v1.0.0/csi-blob-node.yaml | 160 ------------- deploy/v1.0.0/rbac-csi-blob-controller.yaml | 150 ------------ deploy/v1.0.0/rbac-csi-blob-node.yaml | 32 --- deploy/v1.1.0/csi-blob-controller.yaml | 143 ----------- deploy/v1.1.0/csi-blob-driver.yaml | 8 - deploy/v1.1.0/csi-blob-node.yaml | 163 ------------- deploy/v1.1.0/kustomization.yaml | 9 - deploy/v1.1.0/rbac-csi-blob-controller.yaml | 148 ------------ deploy/v1.1.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.2.0/csi-blob-controller.yaml | 144 ------------ deploy/v1.2.0/csi-blob-driver.yaml | 11 - deploy/v1.2.0/csi-blob-node.yaml | 163 ------------- deploy/v1.2.0/kustomization.yaml | 9 - deploy/v1.2.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.2.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.3.0/csi-blob-controller.yaml | 144 ------------ deploy/v1.3.0/csi-blob-driver.yaml | 11 - deploy/v1.3.0/csi-blob-node.yaml | 163 ------------- deploy/v1.3.0/kustomization.yaml | 9 - deploy/v1.3.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.3.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.4.0/csi-blob-controller.yaml | 142 ----------- deploy/v1.4.0/csi-blob-driver.yaml | 11 - deploy/v1.4.0/csi-blob-node.yaml | 160 ------------- deploy/v1.4.0/kustomization.yaml | 9 - deploy/v1.4.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.4.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.4.1/csi-blob-controller.yaml | 142 ----------- deploy/v1.4.1/csi-blob-driver.yaml | 11 - deploy/v1.4.1/csi-blob-node.yaml | 160 ------------- deploy/v1.4.1/kustomization.yaml | 9 - deploy/v1.4.1/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.4.1/rbac-csi-blob-node.yaml | 30 --- deploy/v1.5.0/csi-blob-controller.yaml | 143 ----------- deploy/v1.5.0/csi-blob-driver.yaml | 11 - deploy/v1.5.0/csi-blob-node.yaml | 166 ------------- deploy/v1.5.0/kustomization.yaml | 9 - deploy/v1.5.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.5.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.6.0/blobfuse-proxy.yaml | 87 ------- deploy/v1.6.0/csi-blob-controller.yaml | 137 ----------- deploy/v1.6.0/csi-blob-driver.yaml | 11 - deploy/v1.6.0/csi-blob-node.yaml | 169 ------------- deploy/v1.6.0/kustomization.yaml | 9 - deploy/v1.6.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.6.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.7.0/blobfuse-proxy.yaml | 103 -------- deploy/v1.7.0/csi-blob-controller.yaml | 137 ----------- deploy/v1.7.0/csi-blob-driver.yaml | 11 - deploy/v1.7.0/csi-blob-node.yaml | 165 ------------- deploy/v1.7.0/kustomization.yaml | 9 - deploy/v1.7.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.7.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.8.0/blobfuse-proxy.yaml | 115 --------- deploy/v1.8.0/csi-blob-controller.yaml | 133 ----------- deploy/v1.8.0/csi-blob-driver.yaml | 11 - deploy/v1.8.0/csi-blob-node.yaml | 162 ------------- deploy/v1.8.0/kustomization.yaml | 10 - deploy/v1.8.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.8.0/rbac-csi-blob-node.yaml | 30 --- deploy/v1.9.0/blobfuse-proxy.yaml | 115 --------- deploy/v1.9.0/csi-blob-controller.yaml | 133 ----------- deploy/v1.9.0/csi-blob-driver.yaml | 11 - deploy/v1.9.0/csi-blob-node.yaml | 162 ------------- deploy/v1.9.0/kustomization.yaml | 10 - deploy/v1.9.0/rbac-csi-blob-controller.yaml | 108 --------- deploy/v1.9.0/rbac-csi-blob-node.yaml | 30 --- docs/install-csi-driver-v1.0.0.md | 37 --- docs/install-csi-driver-v1.1.0.md | 37 --- docs/install-csi-driver-v1.2.0.md | 37 --- docs/install-csi-driver-v1.3.0.md | 37 --- docs/install-csi-driver-v1.4.0.md | 37 --- docs/install-csi-driver-v1.5.0.md | 37 --- docs/install-csi-driver-v1.6.0.md | 37 --- docs/install-csi-driver-v1.7.0.md | 38 --- docs/install-csi-driver-v1.8.0.md | 38 --- docs/install-csi-driver-v1.9.0.md | 47 ---- 216 files changed, 31 insertions(+), 14707 deletions(-) delete mode 100644 charts/v1.0.0/blob-csi-driver-v1.0.0.tgz delete mode 100755 charts/v1.0.0/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.0.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.1.0/blob-csi-driver-v1.1.0.tgz delete mode 100755 charts/v1.1.0/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.1.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.2.0/blob-csi-driver-v1.2.0.tgz delete mode 100755 charts/v1.2.0/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.2.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.3.0/blob-csi-driver-v1.3.0.tgz delete mode 100755 charts/v1.3.0/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.3.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.4.0/blob-csi-driver-v1.4.0.tgz delete mode 100755 charts/v1.4.0/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.4.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.4.1/blob-csi-driver-v1.4.1.tgz delete mode 100755 charts/v1.4.1/blob-csi-driver/Chart.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/NOTES.txt delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/_helpers.tpl delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.4.1/blob-csi-driver/values.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver-v1.5.0.tgz delete mode 100755 charts/v1.5.0/blob-csi-driver/Chart.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/NOTES.txt delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/_helpers.tpl delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100644 charts/v1.5.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.5.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver-v1.6.0.tgz delete mode 100755 charts/v1.6.0/blob-csi-driver/Chart.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/NOTES.txt delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/_helpers.tpl delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/blobfuse-proxy.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100644 charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100755 charts/v1.6.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver-v1.7.0.tgz delete mode 100644 charts/v1.7.0/blob-csi-driver/Chart.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/NOTES.txt delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/_helpers.tpl delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/blobfuse-proxy.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100644 charts/v1.7.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver-v1.8.0.tgz delete mode 100644 charts/v1.8.0/blob-csi-driver/Chart.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/NOTES.txt delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/_helpers.tpl delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/blobfuse-proxy.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100644 charts/v1.8.0/blob-csi-driver/values.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver-v1.9.0.tgz delete mode 100644 charts/v1.9.0/blob-csi-driver/Chart.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/NOTES.txt delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/_helpers.tpl delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/blobfuse-proxy.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/csi-blob-controller.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/csi-blob-driver.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/csi-blob-node.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml delete mode 100644 charts/v1.9.0/blob-csi-driver/values.yaml delete mode 100644 deploy/v1.0.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.0.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.0.0/csi-blob-node.yaml delete mode 100644 deploy/v1.0.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.0.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.1.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.1.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.1.0/csi-blob-node.yaml delete mode 100644 deploy/v1.1.0/kustomization.yaml delete mode 100644 deploy/v1.1.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.1.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.2.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.2.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.2.0/csi-blob-node.yaml delete mode 100644 deploy/v1.2.0/kustomization.yaml delete mode 100644 deploy/v1.2.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.2.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.3.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.3.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.3.0/csi-blob-node.yaml delete mode 100644 deploy/v1.3.0/kustomization.yaml delete mode 100644 deploy/v1.3.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.3.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.4.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.4.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.4.0/csi-blob-node.yaml delete mode 100644 deploy/v1.4.0/kustomization.yaml delete mode 100644 deploy/v1.4.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.4.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.4.1/csi-blob-controller.yaml delete mode 100644 deploy/v1.4.1/csi-blob-driver.yaml delete mode 100644 deploy/v1.4.1/csi-blob-node.yaml delete mode 100644 deploy/v1.4.1/kustomization.yaml delete mode 100644 deploy/v1.4.1/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.4.1/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.5.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.5.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.5.0/csi-blob-node.yaml delete mode 100644 deploy/v1.5.0/kustomization.yaml delete mode 100644 deploy/v1.5.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.5.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.6.0/blobfuse-proxy.yaml delete mode 100644 deploy/v1.6.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.6.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.6.0/csi-blob-node.yaml delete mode 100644 deploy/v1.6.0/kustomization.yaml delete mode 100644 deploy/v1.6.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.6.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.7.0/blobfuse-proxy.yaml delete mode 100644 deploy/v1.7.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.7.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.7.0/csi-blob-node.yaml delete mode 100644 deploy/v1.7.0/kustomization.yaml delete mode 100644 deploy/v1.7.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.7.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.8.0/blobfuse-proxy.yaml delete mode 100644 deploy/v1.8.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.8.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.8.0/csi-blob-node.yaml delete mode 100644 deploy/v1.8.0/kustomization.yaml delete mode 100644 deploy/v1.8.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.8.0/rbac-csi-blob-node.yaml delete mode 100644 deploy/v1.9.0/blobfuse-proxy.yaml delete mode 100644 deploy/v1.9.0/csi-blob-controller.yaml delete mode 100644 deploy/v1.9.0/csi-blob-driver.yaml delete mode 100644 deploy/v1.9.0/csi-blob-node.yaml delete mode 100644 deploy/v1.9.0/kustomization.yaml delete mode 100644 deploy/v1.9.0/rbac-csi-blob-controller.yaml delete mode 100644 deploy/v1.9.0/rbac-csi-blob-node.yaml delete mode 100644 docs/install-csi-driver-v1.0.0.md delete mode 100644 docs/install-csi-driver-v1.1.0.md delete mode 100644 docs/install-csi-driver-v1.2.0.md delete mode 100644 docs/install-csi-driver-v1.3.0.md delete mode 100644 docs/install-csi-driver-v1.4.0.md delete mode 100644 docs/install-csi-driver-v1.5.0.md delete mode 100644 docs/install-csi-driver-v1.6.0.md delete mode 100644 docs/install-csi-driver-v1.7.0.md delete mode 100644 docs/install-csi-driver-v1.8.0.md delete mode 100644 docs/install-csi-driver-v1.9.0.md diff --git a/charts/index.yaml b/charts/index.yaml index 833c15ba2..4a03dd7db 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -3,7 +3,7 @@ entries: blob-csi-driver: - apiVersion: v1 appVersion: v1.24.1 - created: "2024-04-12T07:18:31.462033556Z" + created: "2024-04-12T08:55:58.602497989Z" description: Azure Blob Storage CSI driver digest: 82537068e57177cf6d0697c7c942aaf496de9280fbc7b96ab326776fa4db8481 name: blob-csi-driver @@ -12,7 +12,7 @@ entries: version: v1.24.1 - apiVersion: v1 appVersion: v1.24.0 - created: "2024-04-12T07:18:31.461360864Z" + created: "2024-04-12T08:55:58.601837129Z" description: Azure Blob Storage CSI driver digest: 3b3b1a4fee786f0c9445da2f35bd1e251265cec8ee03c3cf99fe9f0bf9575e09 name: blob-csi-driver @@ -21,7 +21,7 @@ entries: version: v1.24.0 - apiVersion: v1 appVersion: v1.23.4 - created: "2024-04-12T07:18:31.460274467Z" + created: "2024-04-12T08:55:58.601188521Z" description: Azure Blob Storage CSI driver digest: 20d0157476ecf0a38b2d3982587b9657b27e1603b2185709faeff73b6d671ceb name: blob-csi-driver @@ -30,7 +30,7 @@ entries: version: v1.23.4 - apiVersion: v1 appVersion: v1.23.3 - created: "2024-04-12T07:18:31.459269166Z" + created: "2024-04-12T08:55:58.600377379Z" description: Azure Blob Storage CSI driver digest: 21c8355faf362c527e40ac417b17d3c28fa041941633254358387997b9bee8a6 name: blob-csi-driver @@ -39,7 +39,7 @@ entries: version: v1.23.3 - apiVersion: v1 appVersion: v1.23.2 - created: "2024-04-12T07:18:31.458611981Z" + created: "2024-04-12T08:55:58.598642302Z" description: Azure Blob Storage CSI driver digest: 057d6658c5879ee7e564d59275366521dc0a2e311c0527e570eaccd544622e60 name: blob-csi-driver @@ -48,7 +48,7 @@ entries: version: v1.23.2 - apiVersion: v1 appVersion: v1.23.1 - created: "2024-04-12T07:18:31.457969298Z" + created: "2024-04-12T08:55:58.598025748Z" description: Azure Blob Storage CSI driver digest: 66215f12a4e3acdcf09416d817b737e14546058b081a2cfd8bf9ef507229ca07 name: blob-csi-driver @@ -57,7 +57,7 @@ entries: version: v1.23.1 - apiVersion: v1 appVersion: v1.23.0 - created: "2024-04-12T07:18:31.457353804Z" + created: "2024-04-12T08:55:58.597382253Z" description: Azure Blob Storage CSI driver digest: 57151e21e33660522f25694bd8ae985e5e17c7ffe09904ad2af4025e8bf1da72 name: blob-csi-driver @@ -66,7 +66,7 @@ entries: version: v1.23.0 - apiVersion: v1 appVersion: v1.22.6 - created: "2024-04-12T07:18:31.456725713Z" + created: "2024-04-12T08:55:58.596756053Z" description: Azure Blob Storage CSI driver digest: 82e30775aa05e093c30884f480d505b408023dfa32944cf21ebffe9d88afb1f2 name: blob-csi-driver @@ -75,7 +75,7 @@ entries: version: v1.22.6 - apiVersion: v1 appVersion: v1.22.5 - created: "2024-04-12T07:18:31.456077167Z" + created: "2024-04-12T08:55:58.596140123Z" description: Azure Blob Storage CSI driver digest: ff3c2c2e05dd048dd0af3e5c7d002eae2928a5d17fb269a1e4d5cadd30e8ab51 name: blob-csi-driver @@ -84,7 +84,7 @@ entries: version: v1.22.5 - apiVersion: v1 appVersion: v1.22.4 - created: "2024-04-12T07:18:31.45545871Z" + created: "2024-04-12T08:55:58.59548611Z" description: Azure Blob Storage CSI driver digest: 6c38e79d2f50616daac0658cfa5b1a569e6ff8ce8f24ed40f563e87fb1d1340a name: blob-csi-driver @@ -93,7 +93,7 @@ entries: version: v1.22.4 - apiVersion: v1 appVersion: v1.22.3 - created: "2024-04-12T07:18:31.454792765Z" + created: "2024-04-12T08:55:58.594821407Z" description: Azure Blob Storage CSI driver digest: 6cdee296d22ecd330f477f2ca6da51b07320c546c04ae46c23eef48146b772c1 name: blob-csi-driver @@ -102,7 +102,7 @@ entries: version: v1.22.3 - apiVersion: v1 appVersion: v1.22.2 - created: "2024-04-12T07:18:31.454147089Z" + created: "2024-04-12T08:55:58.594152673Z" description: Azure Blob Storage CSI driver digest: 259e66dc12db7310fe1c51e49c964398e0a6b7d511133916dd7d25f748f0b791 name: blob-csi-driver @@ -111,7 +111,7 @@ entries: version: v1.22.2 - apiVersion: v1 appVersion: v1.22.1 - created: "2024-04-12T07:18:31.453303054Z" + created: "2024-04-12T08:55:58.592491498Z" description: Azure Blob Storage CSI driver digest: 8329d477d55c82f97bb09fb172c5f39a1677bedc13c7410bd93b306194516438 name: blob-csi-driver @@ -120,7 +120,7 @@ entries: version: v1.22.1 - apiVersion: v1 appVersion: v1.21.7 - created: "2024-04-12T07:18:31.451715932Z" + created: "2024-04-12T08:55:58.591863979Z" description: Azure Blob Storage CSI driver digest: 1095721182d611e2556c611dd330758d8130fe66493db4f9189586a9219896d3 name: blob-csi-driver @@ -129,7 +129,7 @@ entries: version: v1.21.7 - apiVersion: v1 appVersion: v1.21.6 - created: "2024-04-12T07:18:31.451094812Z" + created: "2024-04-12T08:55:58.591242134Z" description: Azure Blob Storage CSI driver digest: d5ba1f92795ec45970eb6e5fc54aa13a5684f9936216c064f8a3843bf722bf54 name: blob-csi-driver @@ -138,7 +138,7 @@ entries: version: v1.21.6 - apiVersion: v1 appVersion: v1.21.5 - created: "2024-04-12T07:18:31.450468305Z" + created: "2024-04-12T08:55:58.590628223Z" description: Azure Blob Storage CSI driver digest: b403e9d49abfe076ecd83d6dd50166347ee4305f33dc840019474b2876723b9b name: blob-csi-driver @@ -147,7 +147,7 @@ entries: version: v1.21.5 - apiVersion: v1 appVersion: v1.21.4 - created: "2024-04-12T07:18:31.449862625Z" + created: "2024-04-12T08:55:58.590049005Z" description: Azure Blob Storage CSI driver digest: e4fa13670caf6b0d3e9fefa55d100daa439cd7187dabd45318ab03c7d4b17710 name: blob-csi-driver @@ -156,7 +156,7 @@ entries: version: v1.21.4 - apiVersion: v1 appVersion: v1.20.3 - created: "2024-04-12T07:18:31.449232951Z" + created: "2024-04-12T08:55:58.589465013Z" description: Azure Blob Storage CSI driver digest: 8c2c20547b2e0e1b39d2f2efd04c1bd778f14af5feae2bda86d722dac3c02643 name: blob-csi-driver @@ -165,7 +165,7 @@ entries: version: v1.20.3 - apiVersion: v1 appVersion: v1.19.6 - created: "2024-04-12T07:18:31.448183785Z" + created: "2024-04-12T08:55:58.588813646Z" description: Azure Blob Storage CSI driver digest: 0007ef225b5658d3989aa6fdc3a91a4b33696a438eee46ad9a675af615cbdf21 name: blob-csi-driver @@ -174,7 +174,7 @@ entries: version: v1.19.6 - apiVersion: v1 appVersion: v1.19.5 - created: "2024-04-12T07:18:31.447588419Z" + created: "2024-04-12T08:55:58.588220429Z" description: Azure Blob Storage CSI driver digest: 183c3e5cd84b709f1455cc7c84ed5bd573e8a24149fd6442d38999835b0a1711 name: blob-csi-driver @@ -183,7 +183,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.0 - created: "2024-04-12T07:18:31.446939966Z" + created: "2024-04-12T08:55:58.587601765Z" description: Azure Blob Storage CSI driver digest: 3eac15488da5be7d1e78431929f7cda35bceb1af3fe107ffbd84606e047c9204 name: blob-csi-driver @@ -192,7 +192,7 @@ entries: version: v1.18.0 - apiVersion: v1 appVersion: v1.17.0 - created: "2024-04-12T07:18:31.44632176Z" + created: "2024-04-12T08:55:58.586876831Z" description: Azure Blob Storage CSI driver digest: 22cfa17fc5e8d771ff8edd26729266a9a8ee55c0e150df85ef15698f7fe985e9 name: blob-csi-driver @@ -201,7 +201,7 @@ entries: version: v1.17.0 - apiVersion: v1 appVersion: v1.16.0 - created: "2024-04-12T07:18:31.445647499Z" + created: "2024-04-12T08:55:58.585341647Z" description: Azure Blob Storage CSI driver digest: bf6249c0e3e3d3d009d4c79ceb7fda9a56c0565b969de753628792ea3ea5ece8 name: blob-csi-driver @@ -210,7 +210,7 @@ entries: version: v1.16.0 - apiVersion: v1 appVersion: v1.15.0 - created: "2024-04-12T07:18:31.443843725Z" + created: "2024-04-12T08:55:58.58473786Z" description: Azure Blob Storage CSI driver digest: 8daa35cd4957695cb64b45da05a15b4020df5545a8ac44c4668dad4bba82c8a9 name: blob-csi-driver @@ -219,7 +219,7 @@ entries: version: v1.15.0 - apiVersion: v1 appVersion: v1.14.0 - created: "2024-04-12T07:18:31.443242253Z" + created: "2024-04-12T08:55:58.584123662Z" description: Azure Blob Storage CSI driver digest: 442bc579b231aab626b9e474e2c0ed3f101d47d61c99aa9a7f863af7ce268d9d name: blob-csi-driver @@ -228,7 +228,7 @@ entries: version: v1.14.0 - apiVersion: v1 appVersion: v1.13.0 - created: "2024-04-12T07:18:31.442607498Z" + created: "2024-04-12T08:55:58.58353028Z" description: Azure Blob Storage CSI driver digest: b577b0b771138109aa90eb09d56fc07273ca0b584a263ee8f789e35796279f31 name: blob-csi-driver @@ -237,7 +237,7 @@ entries: version: v1.13.0 - apiVersion: v1 appVersion: v1.12.0 - created: "2024-04-12T07:18:31.44197167Z" + created: "2024-04-12T08:55:58.582895435Z" description: Azure Blob Storage CSI driver digest: 124e87af2581b374b89a39940698620c23d3eae6dcee518d302461ffea93e9a8 name: blob-csi-driver @@ -246,7 +246,7 @@ entries: version: v1.12.0 - apiVersion: v1 appVersion: v1.11.0 - created: "2024-04-12T07:18:31.441339836Z" + created: "2024-04-12T08:55:58.58228131Z" description: Azure Blob Storage CSI driver digest: 07c4d76017491b3d0bdd70de90e814096938bf7916da0c149c3805294bd57560 name: blob-csi-driver @@ -255,119 +255,20 @@ entries: version: v1.11.0 - apiVersion: v1 appVersion: v1.10.0 - created: "2024-04-12T07:18:31.440704203Z" + created: "2024-04-12T08:55:58.58165317Z" description: Azure Blob Storage CSI driver digest: 79716efa958385adf57eb3570843e1b4512d8c801e8e070625e94264f3e917a9 name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.10.0/blob-csi-driver-v1.10.0.tgz version: v1.10.0 - - apiVersion: v1 - appVersion: v1.9.0 - created: "2024-04-12T07:18:31.466447348Z" - description: Azure Blob Storage CSI driver - digest: fca0b9215d3277346f68c643fb3ead75158971f0d1945ab01ec559196f3cf842 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.9.0/blob-csi-driver-v1.9.0.tgz - version: v1.9.0 - - apiVersion: v1 - appVersion: v1.8.0 - created: "2024-04-12T07:18:31.465869124Z" - description: Azure Blob Storage CSI driver - digest: 3b78e2ab4f33577c54d4f57276c824717d2ad2aa3741210e938fcaf927bc751f - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.8.0/blob-csi-driver-v1.8.0.tgz - version: v1.8.0 - - apiVersion: v1 - appVersion: v1.7.0 - created: "2024-04-12T07:18:31.465292778Z" - description: Azure Blob Storage CSI driver - digest: 28da5b55c3d2689d6da85eb7da344385e9cb99bdb2af18c24fea93670abfe7ea - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.7.0/blob-csi-driver-v1.7.0.tgz - version: v1.7.0 - - apiVersion: v1 - appVersion: v1.6.0 - created: "2024-04-12T07:18:31.464694601Z" - description: Azure Blob Storage CSI driver - digest: 6f24f2e6623f6f8862e47d4fbdf13b5f351ceec6bb9a4591ef7fc2fca9fc1eef - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.6.0/blob-csi-driver-v1.6.0.tgz - version: v1.6.0 - - apiVersion: v1 - appVersion: v1.5.0 - created: "2024-04-12T07:18:31.464152794Z" - description: Azure Blob Storage CSI driver - digest: 95d14c9b70b319760d388ea47727c8c97e9287867a8852aeb67b7175b52fe8f5 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.5.0/blob-csi-driver-v1.5.0.tgz - version: v1.5.0 - - apiVersion: v1 - appVersion: v1.4.1 - created: "2024-04-12T07:18:31.46361856Z" - description: Azure Blob Storage CSI driver - digest: 5fcf69c449f065fa1d5722e5a7fed8a28000efa790907e9ff4b552c5fbd16d22 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.4.1/blob-csi-driver-v1.4.1.tgz - version: v1.4.1 - - apiVersion: v1 - appVersion: v1.4.0 - created: "2024-04-12T07:18:31.463097533Z" - description: Azure Blob Storage CSI driver - digest: b466543344a6411f6130ba87b093955d39ab8614c6b4ed8505a0a0c96073cb33 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.4.0/blob-csi-driver-v1.4.0.tgz - version: v1.4.0 - - apiVersion: v1 - appVersion: v1.3.0 - created: "2024-04-12T07:18:31.462518429Z" - description: Azure Blob Storage CSI driver - digest: 58d02cb70a3a966b349d62e880b7149fb06ac009474e35e580784fd3c98a5b07 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.3.0/blob-csi-driver-v1.3.0.tgz - version: v1.3.0 - - apiVersion: v1 - appVersion: v1.2.0 - created: "2024-04-12T07:18:31.448639791Z" - description: Azure Blob Storage CSI driver - digest: 27fb89f20b5fddc7329e6d7c2374857b22c1d61592e397a53f47121eea68c344 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.2.0/blob-csi-driver-v1.2.0.tgz - version: v1.2.0 - - apiVersion: v1 - appVersion: v1.1.0 - created: "2024-04-12T07:18:31.440082082Z" - description: Azure Blob Storage CSI driver - digest: a251a55243de207c69ef53f72abee45e93b72fa4fc43dc204b7f1cdfd459acdb - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.1.0/blob-csi-driver-v1.1.0.tgz - version: v1.1.0 - - apiVersion: v1 - appVersion: v1.0.0 - created: "2024-04-12T07:18:31.439612191Z" - description: Azure Blob Storage CSI driver - digest: e83f037a165eafc83a978bd7e6bf6221b052ac34363aecb12e6a73607dc58b89 - name: blob-csi-driver - urls: - - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/v1.0.0/blob-csi-driver-v1.0.0.tgz - version: v1.0.0 - apiVersion: v1 appVersion: latest - created: "2024-04-12T07:18:31.439263063Z" + created: "2024-04-12T08:55:58.580991358Z" description: Azure Blob Storage CSI driver digest: c124c5652331e6b3e47e600740a65aa86b1cd31a899a586dec4002c59d51268e name: blob-csi-driver urls: - https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/master/charts/latest/blob-csi-driver-v0.0.0.tgz version: v0.0.0 -generated: "2024-04-12T07:18:31.438375493Z" +generated: "2024-04-12T08:55:58.580173888Z" diff --git a/charts/v1.0.0/blob-csi-driver-v1.0.0.tgz b/charts/v1.0.0/blob-csi-driver-v1.0.0.tgz deleted file mode 100644 index 56880b8860e35a33ceb8fd091bc4e7a313f8bbe6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3656 zcmV-O4!7|iiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH+}bLzIT=WjlRhIZ!m-KoJ4lGE#%{BfGlHq#!G3AugqUZ;~0 z_5!+*C0CMZIC*^cHNB)Nm_JfP7-;wTCMh%b}bJmA2_}wP9VqxMDt>dg=$Q3 zNI&eYxvuM;bvov!>$>@$ZtLviL#yMqPCK3U$;Yz~ZmZKeYkxrQ{zxcqu~b-maCe?7 zzqohOa72CuAqi(5nzU*-ic)_z+=g2VK>C72%JhBu5(_}zQ$9d_#RVP#y69gaD?BxZ zL-0^eq-v8K!q;B%v87sG|0kHnAoptkD)hhIYP)&;@3c;i`hSQ%F*zLQN~lIC1+%n*e~3-#82M=_;0oDzTPp{rq!s~Z6l zn5rSF(XJ+d@c=H3!rj7%NG0$toY2@3{dL3~Uxp;N(b`~NZb6bSJMD?E7&80Os$xg& ztFO>43_$?%0V$3+1H=Uixd0@=7oeo>9sr6Z1nAE(FoTV-5i;^CH3|Vk4~>CZ5=AUzyHttLTCh!3B9pT|sTzWM^N0uLz>L#-wT*kA3{L%U@l%|2tPKzL|~snqgF`REsz0QJymt)?ZU z5ZP%SQZtT%p_LLAKi*(9_E2+zMU#?2GkILA`IN^&5)a(O14{f0-E(yA@2uthzk)EL zSb=QzuJ5}2hI&*x&rucspPaRG{{Qss?DXjW57F+%us*0qM4}NWrHyR>5#>_|v^Adc zSRkLq+UhiFwL6adG58M^$RZv{Bo$V%{1!zNumo+syjl$*ZCiXrk>hByDkJQ}`O_0> z+=78?^ya;2Hq#_oWR4?&g0HwZuL}#gG78ORb>ujIV&#vI7An)q_5XehGy)+TD%$ZV zP@(@V*Ug{*&rUlZkNSUz_Vm>JCz_DZLlP7k5(;V>!Fj0J;*Y^Y|7^}?wVDB=cmPzQ zAs1*2G(-X+>tBL$Q688E#lV1ZeK<${hB7h$R9 z#Uh9-<3qHJ94#|61R0{nulBH=4=>Sdwis+b6mBukC*Pc+CGEq&*N8c<9mucA-9vrxU_Biez)r zs)e9%fECu}+ZvYw;YgmD2+dlbqQI^8^n?iWX&mIeL<9XFA}j!=P+J4a2+ZMldDhi9 z6VsQH$@z0@{pP`-O=5&P;`?RcdUV54R7U<>_LH%TMQ=j#nVr6J4ut0G(rp7rU;&HV z0aE;ba7fEXFNphYi74d)Y{!^O)dTe>7Y|7S!uDr#7rnAwcO*^*FdjfZ!+qVdi5MZENXn-;(fdz{}ksw?UHNBu%O1ls_l?p;9Ilef)Ad2{y zCIb|wAh05w$>l;E^Z-*2)lITZ&)dk7Vu`3P;1S}H#=?b%>fJwLOw-Ryn%zTn&G2+T z42R(Bh^5E-{uqLoE*>w#j5JY8b|K2#ZmQ zDoa`2nVh%lE1kyK;2;nn<@wj&e7d~6?e_csRR9PK4=6YkZ~($Fl0`Tx0L(!+CW1Al zM;1UU5(1Ca`I(z}3T%>Jf#q#NVdmLl1Di?avmv?vkZ1c%D&twj(q(ou%h?I1aR{Ha z4z6%G)V)c9Zk~x>HcP#x3zQR(v`{Pp_bf(ALXw{aAwP;e)N_1 zIA)or+;dnKhA)UXVe0(rZ!Y>*_uby*&Gl99uE<6v3H^mMx()NsE1ou${?E;<L89y5GDFF`==5yRiT{<}~oo zY3BJ@spv;r7s0?fAse~AS+ zzJS0P5())yCYaLPD|p7u%O;p+;?e9v(t1(snPWyCy=JrAJSUMS{IL{0%HsglO$NKK zu>W96TJ^l_WtrLS{d#qK-TU0_-5GA)e7gIoBvt_BrwWrz!z8KaF%vibBRN~4FcYhz zjhw!C{`OfqGmfU%s8GT{CRDL z{LR`1`pI#n4BL7E^L+S2O^~UZ=%LO*&zww+B%;pcI{N*!^0tAL`|3Q8q24ZUK`JA z|8N-#!bVB;i?Gqvi19^t_YwTqTt&*nlxfQDK!huY0>{cv_c7AvzieeM4Q0|&I{Yqp zny%>exVmD6F!lmX5q3Mnga{R5>dfDyw*BQbO0xx<6)4jM$44TS+@y54zQFRr)rYM! zPJhwr&&dkJSc)k$>!a-X*WX;; zUj5p=y}#}Lc;COi`1$_k)7?+27_vJ_$q@XhPjh96NFT9zl-~e&Tsoc9w(u~-+I|04 zZw?4+%5nW$RCj#+zX*{dhNzjQx49al>-8Af&E}WtzrR?%YB{`!ZN<1!8=mY#V{thJ z&1TJr#v{UH!TO3Qc#&k+a&SgM)}u&+zE3TGBc)YyMA=(WRbwN;`Pbk4{O|9(_r2@O z?tSmm=kAvES3}$GmDiI(=eWfJdNu9rydhJCqihBc`{O+;!kaFLm&G^-Dl@LDjBOw6 zW?fmfhIlP5%q8;_OsO93M&DLcl&IleICa0g$iEF~rSbfF;px2Tfky>`VVb_LV(!yyULX=NweHl>LI00}$C32V zZ}lW=wG0Q)Z}mviDitu5xZwn70%BkQMxg57qB!pRZB-S&CoCXrw6jL$E{{2b1n<7fy7AyY;HyM&?P~g&;$v+Y zR;;;cxCv3(o+N34RSUMEIYNtFU&Ge2z1g3cOkSUz)3nzuIfxm1Q-rEez$^)eR`*}$k8;2)^4f)nH zRnKC%WH%;!PB8Ws?a7M^uw*!rW3JR2l;Kx#e{b*bu$v1kTtV`Mr8T4%aV8O!4KzL3 z z$~o{>tl>W6tF%4*d!FuAJi;?Vtvx!bs+Fx}kg;!;`scW*4ea7lLVqt=@*bpd&6=Jk zlW$Jcs&;ELS$leVjk?9A`Tt!mfmD6}vz7n;Z`Dc zVQyr3R8em|NM&qo0PH<$QzN&s``N!j<)rH5o}2L#NRF%cz=D%d*#!!d+l#hcdF zxQdhei!>aO-$6*iSr1LyH5^5mKO5~vs}_Lt1&Ne-cJe6}fZkF*L<7YI9s@cZoFgkd zHHJgzp@K-&riB5&^py84(~9~(#WV)FTLVy`|A+0vlKvmG+k5?giAF*^hF%RBt<^sf z5)g4o6c@7|3VqQCi7&Y1Bh~PE*u48uHhoE&bHOoP6do_sTmK!!lwNU4{8Qxo7t=_nZ*I_uCt$rKn!c_sJ|HY8y_ zMx@8c5zwh7fbjs%jKW>Rm`Ek?Hk{C-CHm`$`FKy|i7&}DNb+f`H4%1%r^!U^n{UuH zj6eYM0V$3+1H=Uixd0@=7oenmJOmU=2++p~Ff$rqV`Su4>LCP-dT63lBzw&aZ4JVk z0rzE7VRUGo`et@f#|kc?@97hz=s&5qhGmOXWq-9fKNf+-V%Lk z>y}{cxUhD%#9Gv_mt!~{jR+%Z)l!kffm7^HAb|+AN%tv_gJipK6%Q%#PxTXH>rb_9^ED>KM+h^fFd5qTEv5s6w1(B8iwK_&{ZS6GXV3>T71p!1*0oLxFBkFO0hHp0F!!?Gvl!^ zy^)N?luThE%ScYRQ0w{~pEDmKmAW1?jiz=#QG(iw1VCuESj}3LrRTyp7&(&)LeHnM zw(*`Xh$23w8v6kJSVJT2HQ-*Mv8Gf{oJSECD%Pv6XC-2W7VZvVi5O>sbA^U8Bta?d zOtE&iX1G&3n#M4kA-Kn3L;>~m-vy8!deCE^$ls)PKkZhljUH=dpvV2b?=`mK{!c*| zQLI2VFD`HTgNC|STjQw8|8$Q!h5g^*(P3x5|9gpcGlA7f9YiD=C-xzLi1Ha2`)bT% zVv!oP+6_nk1pK>-!K^)1SjF;t6j8tuwDsuKY6xlD=_`snPlweq!af{7JfOxk7`Vp8 z{3&{T)Fj!#JWnQ$?6@v0WT_W=e5@nS`-qi4*_)wUTdx1N6QB_Y*-+7zsh|q|Zy$79 z1^qudJZSCp|0UYPL-U_#O2VEF0?>$1P_qb*L(LX{0zLH4=Hp|nX2AOQ5oUo2F0_W~ zuuK2$o05TndwB3r03*Tx)wN_bG+Z5d=_?#lg&MZXU{tg5yUjRi$jd1RK>|RZ^;Veq z=%{OcCE*|*jmSN!d+A`n0`t42Es6g|T%ZZiFx__`H(?guj%I(?kuHv+Mm~fiOe)NL z=%KpHsyO?EnBg%5-f&jvHL&@Br5$ZLL3f#}VUCTBr5zV3c7x7x_hgaEPr7NoG~}dd z)++~^%*IQf%zJ(|sRnIx>;GLx_1n4`tIYl=rgV|3(TlJMHJ2?wl%`YCH6^2kJQQ;- z=<%_fhcTT5W@A{{4=qwPdWI+?K<;0?$Hy5>YwQNBUN0Dq!+qDoB4zR-7L|Q#^2uJeHL}*sgT!CBd z;QN$v;s*MR2n&EI)X{)40+Sp~bDet3M9VWWIe%{zeI5+j)JUi!zIO}rt{aY` zW#rFgKk2)e^g1LTS@e}SY%AEhMS5axgcScb4yoh)XT%bOKV}gYY*>EiOztG@lS#Xn z36?QtyDg^qW5HtTq3lBGhaZWWxRz+1eafJp=cBT?SEDI$&28y*bNPwl3`@Q9$V+^+ z4<=ly3s4`qxJ%OY_IWEt8w97Nj+Rj9lc$6P!X1 zg$Af7Itjm*tzeeiM2pv5HAg-xqsXPV=rj5da|P)D+05jEY!BUm4zEn@OHZs6gXvkO zc~pdWq(|YRhwA+gF{asfrs~~8b-kk5=P(+9uVbYPKJX_H#5D7|iiCuH5ycFig^*PX zmSNfx*DHan{NqgvB^Rm8GoiO^-YE z(nAx};2;nnB zSleb&ENw{c-xWD{l_`=|aVIUP=9x6*G!EgtHbf<6h5BidsGk?_(`KpHbb;~$l3m}l zj)NQyD(8+9zyi4$*VXQ137#`;UK;Y!ih548>Lq^Uib++Xl?S$xRj&}Oybvv0I16d0 zUPww;mH;MzshW5Z7wWk4ucL0~No$`cf0LzOvADV|my8XTK1Xfk%4;XN&U%~Jt74fA z%zCR^_q^$`JEoi$I18gBy?_M~CrllG``zi_{I-8_c6E7vapSTrN%?dkL#>ASujfXa zDYh0cYpE$sU}34#>TJiU(N^?672ZXHEADgJLpP^atA}2PTqouPWKGP@>acAiOlU0N zW+FgNI1Oy+Ic*cAqMvQKGKa|u^b!V`xw+oy*py&;1{BW*;Bywp9_qGoFo~)!UbpkF zBM^iK?#{~{$N&9y-M>A(?w|E9Zq85M-Trj`uJ3XqKM5Wd=W1N{h>YLk=vSCs!>9}} zUl#l+7U1~;0&hepIN(fZqq*mJ$4#XY(_93cpGa0O+-@*t%Zvj}?Vj0lr&n3b^Qjl)VbPqt7S85*~dshoe= z;#B3l3g>E4xblIUvXm4%JClhS3chr1df%SEtx^B6Z??D+Qg!~P(`gs$Kdqzv`JWeQ zp6BICKGL4qY25v23(M^xDBP~uGSTVa+@79wRD?`at6i#b1r48viwX`_3j335_z)8T zL3%bg;=wr^@k?fWWfER}X@>mG+8X-LcmP{n2&<_79vl_*zx~5u=Wwt8FVT|vFNTn_ z0X(_vhIVdJC%NZ9y%%=qGQ{~r9< zT+`0&H?joYfCyzVE$CU@>fc9tJD)GJW}!?o2 zre1ovBo&g3SQ@@(&lEUxE)P?!clN<^)vB{4-+Lleri|#CTd8>Ko?aGg-5IW`?yj!D zJ#%?#8Qx8I#zpa0&!zP;}Md^@;2{q^?h)2aM_zx_n?u5ya#H^u)jePTUxdgLBh<{&+bnOd^n8r0<+GXfKc6i-ns#@w%{^9T z!IO1pEUpux$H!(w<1t~1^$=OmO|{Y?`+t>MxtYn%s^r7u?y$C! z_p@xzx9=|B{`A|Re|vR(`M>{OIf%W6To|iFv*WU|n|uvTg5z($`}McC{o9Mnv;OVH z$@~6>MSg>O`|^I*=@xfc-ggk{uFvfY`}@MagZ?!N`}3ie=yvs}c@byIK>$X2p-`v zwqHhjO9E4`Gz#Q4nYxd0Y+v8;j6B|uDQ7R6Y{vd%$J+2(&UO^yyA`K89HsYF)#A;g z=)7mk*VQk@srm3wt+-AOh1rh2ji^|nFFWDXZTfMiO-Nh1QCLmfMu#3&XnX~>4b;D^ z-A`tB?t3tK?OAFC_J=OM)S-*z^*=GhzPFQ0pcVDMPUom_|G(2dI@;I&UZfR7T`zjW zevZyu%1-M3w%~HjDdY--1{%j|!Ug$cjA43iV95~y5AGERhH3U5k-0s!et}TL)VfYj zhvPpB9!Ii={-`IZuZ)lWqaJBmr2?iBmzw}hK@1JR7*zdxRHsCmpXM7O`WMRt{ac{T z8kl|?AoM9F;o4Z^D8O!9`GXlQ%n5`~v6O4hbIm!y6qwq8bIEMJNA+kKy6S29lIXZ< zrxV(0K%a9F5SEp97Y=F*N00wJBJ>zucIt9T;#N5v{?@90*)xabu}?IDce5@kY#%wB;-*QL&r@ zuf-bfGIGk=!>?_xRkHO@2%>iCf}B$~Hp$ZU6xXtbElZ{9&n1)Dfds9X(_D^Top@BO z!+P@Z@UXv=_&jah{QowWfU3U#+isV>|9H^p?(_dI(JJ!)JGRSJCdGNb=sE57OIYjd z9JaI5+1o3-x6mbC?``wf-(rrnyfWLR=n#QquK9~e`$vP#oE*L|LKunrT(`I|NdvEbJ*Sg`=2k;UP}Md&}Far a`?hcUwr{VY{eJ)e0RR7uZiDc zVQyr3R8em|NM&qo0PH+}Z{xO-`?o&@;TCwkca>#3KekUm{>Ub78+11b;uDwAWp!^1h{)P#$2|h zIHYg3*4piM`}pus|J!c2%m21J#|LjZhwY<-&fDXo_Tig$=dg3!d4t;9BVln%gv9(! zdt+SX$^Atdj>&Jp1!0kgCY=V3x-@WE_tGo%Jt} z5uQebL-0^Zq#BbFz^^^!eap47{!cJXKy23l)ad_F_wcZ+|A*bfz5ef_kr0o-Yap$) z>R*I7#7q#$_|!w8&s!n!ITLIsTRscj`yYhs3*ycMM-P#BG*@r+cbrgq%_#Aw9=aG_ zGI`BGKqMOo`GQm>ax`>Xoq8H5nLq?WTyr*n zHA$Gkh!u#e1jZhHoS0}uHy6;n3^t^ID|$+0G|*aPJN#xkyL#3IbaDMl01P%gAcG@eX5A! zj8d}_AxKVqam~2&Q1@TQ2Zs&4IF$HUc&MxXN}{Xi0~QjvhlZF6C6APi-opf_hmQ0B z!4mF+FpFzbLZucVkz)22LVlchsMBtT^CuyMS|ASE?GGe<%Hd-I!ZNNptUDWJQNZV* z?`()Zvvmuwc5PU@8)7YM*zOpPheHw(IrY%vQ)+$sK*5)cr;j9~z_If2jBqzpAFKEs zRb&eyJfNU_!eJudnzM&#CWeFb@fr5VaE;~ILvE-IPDv!cvPdkn$gfH+o84x0^k!I} zFB{gi8UvgG(N}~ZHbgjzm^5Um-2n%I{)bW|$ADrMh(^qU)2x@CP6Z6b1E9+w zyw@FOgO%LtV-H5xoG?!0^o(Mm0YD_w>1fIlu16!0@Cli~T-1>mGcH%%uzVKz5X)@O z($jF&z7?maotOiJ`fTPl;ygWdlXZ^ z0+bQ(8tFFSONtyvxz!@ZKAb#0qSg)Qeyz*dQ}pztNHV}2N2J@;Nt2tNl~L&Fsfirt z6PEsXZ%qnqvHstUfyTf^OU4_9f@<`?b9m4$>HqQ3QFpKZyJ(M(?my9lgr3^V(U4G( z(-=-d#TI`I9{Q*I^wenRZoP*XM}hVcl!hwrN&WV9%0Tyfd~{F%LlOaMD#>c8eof?L zuW&*oYMCs9R?XILCgP}iUQK{=5&-(5j>3_Tjt}%t684kfkUXHKlL3P$(7z3BsT((B z9F2j7+35n=2{Zi-H1k_|syL2YMNf=GLgL5=4>fI8#X2U$5gtL{45p<~eG?B@IM9R> z)R37P7TDNYIB*_f*XRs)Pv@chbeQ5xON?E;UWKmdY@F=LY~&XmYEU+}`QJ^{ylZM$ zRrbdTrSn*gT7yfGcB;({&x;Lol^d<+kX3Ypa0uQyC+fLp>yrng%HU`J|T$X zSh$l;BLs;9EU_|?MvpAQk-pO&no%^HTD8tMM zN%n6XQp@`WW0;dBC zaklw77I$?fJfwqYi{vJ+1z8RtCl^I{oKf&ls5_NKC*}8|70i>HYVoS8W(iB>6uI&i zeL)`+CLsfmk3Y`IcF;X2_e$r!)Wj-2n3`o4Ek%gM3JT*MYW6-RnC9Q>-Yc5(vboV zfIE-|g)KcK34tZ@}_tGpjAZ{QEMy zuX7pII#y=c*CIzJj3yy`P*$tL*-$-Ab2hU~f7T53iY|~&K=Q(nrFM{IH!JMjX~z<| zxqYtF%@aIh+AIU%Wa;-DW75M-fggpOMV%ZLI~y@<*2rN|;ubBO>8Yuni`9XsbTH`@ zCuUrpbpLgH(0$h07s+2|>6a|7YKtXfjioP8Tf6dVM_pyTwWC(E%(~BNtJ`#($%)+o zon=EyP-fO43SvegdGhUdXZ?%2-sSoA)y3tl%@C!z*0~I|TlzoGjy9K>En!wtQ<=cr ztY3(IrW`}~lMKH&HlnBKs!+F`aV_;bQR?QelI zBor+Dv>&9nXL-kTb`sNqyIh<|UN7w1U=ooB&vh3Y)zmSDkD2IEmISD&(-D1%{d=89 zZkA=Q$jsj5Zx=ULmmhkUx0;*Rr?)>Z*qA~3xumVDnIse*jkxuvU{Qs_f}e#}GUfAF z+0Hia^5D`$t$;ESSpsO5{QrdU2qNizFk#XQW$cNZI(?I6D(gQ799%_oY7wBh;V8{- zV`&hAF4^%EM}(lR4_rQjTB`)uh}CDlmZ^+D%^?Jg)1MbB4W#K6!i0_H(V@L8t zNLPEqzAQ-BcXJq)_*9MDVny>jmV0&Rye!NdamkHta?PT9&l=oE&p)FI0`5@ z#z;6MN(4B-IpZg~IO`u@_i!@f)ob@oGg23bmLb$~Shms!VJxTTWL8^M9AP!1VMFqpgttGjBGy5>#9N)jlfc|JrZg?(=`UXpZ9) zQhvhJY_;xxFogx3hRG0~^)F0$v?Up+&nU_d(EC@*XW=|~ho!{+_y#^EghP;(g~u$o zh=%Mc(i{BTCp3HMdol!n>Qnf@g4xFEpld~0XwZc zHP?F726~=w5{=RfFo{MNqlnEP_a4Ac^fmp$X(CVHE$~p3BfXB%t=>b-L5O0SJ>N@5 zdDWrP@;0B1v>-CpV_ah(y9NHyidB z2^(jF3(~P1M84~~8flUHze<6GFc0s({W(kIJa5TVIkCt zb6?G|uja5@``T)b8K?!iT@SUW;)@X!HFtGx4++p<0h>!-RI7H-gUu$P&fZ#~^!H-& zo&GZmTFPmulIuh!gu)1diuCQG5N4T=Vs!&Ji}47X@8-QDfzHEf<*}PfHAZi0f4_R1 zBHl2P)=?%Djs5YKHDO=O^U8~PN>1;RG9F8<#j+IoddG_5R$nZlxZ-EJ&Og^@p#@o= zwzVi&U_dtJ&|UT&v~|c@cw$;k*jkk>OUz#lwl!>F)zq!bYAx`UFJl!|W}!{om)7h{ zYqr-G=YRPC`_5J_f!BTi_2{U4|GV?HeZ0^A?xK~XS}l6ZTxHWAWk`!ZOv3GkQ79yc zb#I)=G2`U3w(z;S5qq780}m2-glYaBp}sx2dV-iI)c637I&A&KSrQ8m{oYK2nmK3V z_hvk=8cBVJw9^FmKzA5{Y<`cLlnDK={zj1cVVIzP1!_YCJ#GzzKE)(l8EX;;*p91M zm}A47_V6hdV#Rr`I477uB-h|vI-BoNGhT$QeplRmLzh5o zg=r1PaT--sd`F^yM5B#0vUI7}8KiJmFW5*1e}XSll{9qr3t9L^=Fu#<6~9UqiLw-F z;H7GtRx~tdzMe}MwutN7Gl1zWgx6^NR#4n6+rEu3xaNy;WkhSv!fx$0Gjlt)8A^Cx z)MBhBy|3I<6ejfyGqF`GVV$-vp)zzm#{#6E8=AF9`T`@e%$iE8I9RemQZl{Hum z+lqc4-u8`={P$?THbehn?fLrud{3}e|0~~r?H(N-?Dc;aZCCoAIXHXG-?x3+w|#pJ S?f(M+0RR8FFiT|sdH?{v$wd4B diff --git a/charts/v1.2.0/blob-csi-driver/Chart.yaml b/charts/v1.2.0/blob-csi-driver/Chart.yaml deleted file mode 100755 index 7b6e49f93..000000000 --- a/charts/v1.2.0/blob-csi-driver/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1.2.0 -description: Azure Blob Storage CSI driver -name: blob-csi-driver -version: v1.2.0 diff --git a/charts/v1.2.0/blob-csi-driver/templates/NOTES.txt b/charts/v1.2.0/blob-csi-driver/templates/NOTES.txt deleted file mode 100755 index 9ad135dd4..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -The Azure Blob Storage CSI driver is getting deployed to your cluster. - -To check Azure Blob Storage CSI driver pods status, please run: - - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.2.0/blob-csi-driver/templates/_helpers.tpl b/charts/v1.2.0/blob-csi-driver/templates/_helpers.tpl deleted file mode 100755 index 5231cd26f..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/_helpers.tpl +++ /dev/null @@ -1,26 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* Expand the name of the chart.*/}} -{{- define "blob.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* labels for helm resources */}} -{{- define "blob.labels" -}} -labels: - app.kubernetes.io/instance: "{{ .Release.Name }}" - app.kubernetes.io/managed-by: "{{ .Release.Service }}" - app.kubernetes.io/name: "{{ template "blob.name" . }}" - app.kubernetes.io/version: "{{ .Chart.AppVersion }}" - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} - -{{/* pull secrets for containers */}} -{{- define "blob.pullSecrets" -}} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.2.0/blob-csi-driver/templates/csi-blob-controller.yaml deleted file mode 100755 index 25dd949aa..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ /dev/null @@ -1,180 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-blob-controller - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - replicas: {{ .Values.controller.replicas }} - selector: - matchLabels: - app: csi-blob-controller - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: csi-blob-controller - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: -{{- with .Values.controller.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - serviceAccountName: csi-blob-controller-sa - nodeSelector: - kubernetes.io/os: linux - {{- if .Values.controller.runOnMaster}} - kubernetes.io/role: master - {{- end}} -{{- with .Values.controller.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "true" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/controlplane" - operator: "Equal" - value: "true" - effect: "NoSchedule" -{{- with .Values.controller.tolerations }} -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: csi-provisioner - image: {{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }} - args: - - "-v=2" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--timeout=60s" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} - - name: liveness-probe - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port=29632 - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.controller.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" - ports: - - containerPort: 29632 - name: healthz - protocol: TCP - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} - - name: csi-resizer - image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" - args: - - "-csi-address=$(ADDRESS)" - - "-v=2" - - "-leader-election" - - '-handle-volume-inuse-error=false' - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} - volumes: - - name: socket-dir - emptyDir: {} - - name: azure-cred - hostPath: - path: /etc/kubernetes/ - type: Directory - - name: msi - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.2.0/blob-csi-driver/templates/csi-blob-driver.yaml deleted file mode 100755 index 7b216feab..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-driver.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: blob.csi.azure.com -spec: - attachRequired: false - podInfoOnMount: true - volumeLifecycleModes: - - Persistent - - Ephemeral diff --git a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.2.0/blob-csi-driver/templates/csi-blob-node.yaml deleted file mode 100755 index b20b36461..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/csi-blob-node.yaml +++ /dev/null @@ -1,199 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: csi-blob-node - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - selector: - matchLabels: - app: csi-blob-node - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: csi-blob-node - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: csi-blob-node-sa - nodeSelector: - kubernetes.io/os: linux -{{- with .Values.node.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - {{- if .Values.node.affinity }} -{{- toYaml .Values.node.affinity | nindent 8 }} - {{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} - tolerations: - - operator: "Exists" - {{- if .Values.node.tolerations }} -{{- toYaml .Values.node.tolerations | nindent 8 }} - {{- end }} - containers: - - name: liveness-probe - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} - - --v=2 - resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - - name: node-driver-registrar - image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=2 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/blob.csi.azure.com-reg.sock /csi/csi.sock"] - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ .Values.linux.kubelet }}/plugins/blob.csi.azure.com/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.node.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" - - "--nodeid=$(KUBE_NODE_NAME)" - - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - ports: - - containerPort: 29633 - name: healthz - protocol: TCP - - containerPort: {{ .Values.node.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: BLOBFUSE_PROXY_ENDPOINT - value: unix:///csi/blobfuse-proxy.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.linux.kubelet }}/ - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - - mountPath: /mnt - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} - volumes: - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins/blob.csi.azure.com - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/ - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins_registry/ - type: DirectoryOrCreate - name: registration-dir - - hostPath: - path: /etc/kubernetes/ - type: Directory - name: azure-cred - - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - name: msi - - hostPath: - path: {{ .Values.node.blobfuseCachePath }} - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml deleted file mode 100755 index c453e1dcd..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: blob-external-provisioner-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: blob-csi-provisioner-binding -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: csi-blob-controller-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: blob-external-provisioner-role - apiGroup: rbac.authorization.k8s.io - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: blob-external-resizer-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: blob-csi-resizer-role -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: csi-blob-controller-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: blob-external-resizer-role - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-blob-controller-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "create"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-blob-controller-secret-binding -subjects: - - kind: ServiceAccount - name: csi-blob-controller-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-blob-controller-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml deleted file mode 100755 index aa24d7089..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create -}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-blob-node-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-blob-node-secret-binding -subjects: - - kind: ServiceAccount - name: csi-blob-node-sa - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-blob-node-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml deleted file mode 100755 index 33266b1c3..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-blob-controller-sa - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml deleted file mode 100755 index 4ca48c523..000000000 --- a/charts/v1.2.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-blob-node-sa - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.2.0/blob-csi-driver/values.yaml b/charts/v1.2.0/blob-csi-driver/values.yaml deleted file mode 100755 index 154d5c02a..000000000 --- a/charts/v1.2.0/blob-csi-driver/values.yaml +++ /dev/null @@ -1,121 +0,0 @@ -image: - blob: - repository: mcr.microsoft.com/k8s/csi/blob-csi - tag: v1.2.0 - pullPolicy: IfNotPresent - csiProvisioner: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner - tag: v2.1.0 - pullPolicy: IfNotPresent - livenessProbe: - repository: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe - tag: v2.3.0 - pullPolicy: IfNotPresent - nodeDriverRegistrar: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.2.0 - pullPolicy: IfNotPresent - csiResizer: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer - tag: v1.1.0 - pullPolicy: IfNotPresent - -## Reference to one or more secrets to be used when pulling images -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: myRegistryKeySecretName - -serviceAccount: - create: true - -rbac: - create: true - -controller: - metricsPort: 29634 - replicas: 2 - runOnMaster: false - logLevel: 5 - resources: - csiProvisioner: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 10m - memory: 20Mi - csiResizer: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: [] - -node: - metricsPort: 29635 - logLevel: 5 - enableBlobfuseProxy: false - blobfuseCachePath: /mnt - resources: - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - nodeDriverRegistrar: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: "2" - memory: 2100Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: [] - livenessProbe: - healthPort: 29633 - -linux: - kubelet: /var/lib/kubelet - distro: debian - -cloud: AzurePublicCloud - -## Collection of annotations to add to all the pods -podAnnotations: {} -## Collection of labels to add to all the pods -podLabels: {} -## Leverage a PriorityClass to ensure your pods survive resource shortages -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: system-cluster-critical -## Security context give the opportunity to run container as nonroot by setting a securityContext -## by example : -## securityContext: { runAsUser: 1001 } -securityContext: {} diff --git a/charts/v1.3.0/blob-csi-driver-v1.3.0.tgz b/charts/v1.3.0/blob-csi-driver-v1.3.0.tgz deleted file mode 100644 index 4749a12f797846d65473b8768bdf907e9c0c2ec9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4251 zcmV;M5M=KkiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjZ{xPIa6kK35N?6f=RB2PlWdQGe8?u=Ht22=#A)w6w#`4n^6 zn&W_ev$N9abUG(TNBXnV>6AY^-IM+|-J{M)_vEmGm8=^k~DkH0~k-I1`o#6n{J zO=oLd<-z?$3XaHczy)EUi)P&hj-uSpR=?G0_#iw^BB|@nKE)i+TgpagC>h5SKe)v(eJwe(jen&B-H;fW*?xM@_ z6_Ym{1cb7IkSE9uXEUNy0`PS(G9lUz@d$Vb5`@C32ymhdN{)tJt6NV4B{K*?h#SsE zuptR4j7Y&K5U|E1gv^HvEpWFmAwqJz%}gk0iTpY|g;yY{x7NDcf*_x^S`%&{6nLU` zJsRJBi*8{I96}F}WQc`;7)Jr)fCP9Pq)^o(K(PQHeVjt5p^qPU~Pi22Z-EdBuj_6#SSbkLpk+ zauHD+!p;yk6gI1z9an@1LOi0NjNfrA;D)ouc_we8r1~6tQ@Fu$>Y{d_HET)szB2DG z-FjY?05*HgYVYl^ZX^VG{(2-ril*Ism;$D9nlVqm(HH}kG1t!xABiE zJT(f$#t4TYlV(h655>N(KPW|V3MgW}Xhh6E%UbEVsDPn(1auXI_qsvaSqY6k9Ab3C z3FAc0&nXrf0E9xFA?GaSx;GLrpOG0XJ&d;6-J=Ax6LEk* zpJ3aKC{NFo-oM3MNC+H{#!9C-9w(A`m@4c;@M6^(Ddhu?5=|7PYTzu27?-i)C^ae` zhiG92%R^quS_s!((;%{3juZD3&1FSJ!ugVM{*Bt1!+bi3A7T8o~a6-fq|5zeL_w?i-AI2mE)Kr|+Qtg_^$sXaDO4Kq@1}&Pc-;Bpm zwY;7I=fnr}SsjH#51sV&R}u{4@t8cKrjr4K(AVD$ZHa+0W*kj{2AR=~?1Y*22AcV< zY%m-}r9%e^g~Xw!&Js4QV(k;)5Kq8&M)Ojyq45VS?P%Nys!J+{g=uUp?YMBUYjkFI z&laxyq??+TmYBBndKH=`qj9pDwC88-YEU}2`Tv`!dEeBqs?#6ElrDTVY7rK;(6R-H zGIvV0rewU}hqBKFJwI3dFuD`R;RIIpLkm}p8X?Ltko~Lk{QR}$a$jY+{#TXe>B1ki zVOU_b{_pmW-n=R5e|6I99`ye{$^!{~7hPz>E`U%r@&Q2{MWQ|HHUf~?#}X?QY2?Vl z9LY0np&3E58E%cICqzPz#(r57x6o%qLLWkjda9v_Ks!e>U#EhZNO_JY)8|IeQ)AE^ z%L8TcyyUh6(AVNHrKIZ?=jT)jN%n6XP|NerC`=Ikm`7OB zVMWln!+q8#=NHfetYXY}TMYBZlEze^%tEQkk3>#wOO(1^a_CcMOdj`YG?iJiYkE~| z`$Tz!mEJ|%xJ6{9PyERu$vpWV9F5>N7yjS%LO994$0rZYRuc}UYr zy^&+z=OD!Cci*2~T-*+Z!~bV#NCEr69Y|b!mKKr(z+!oN(h-)Y@F+QUOx(&zHRWmE z+pY?PvohsoP7}%s&lXPmb`ni&3FbYNr~4)sMXa03EaF(4(HWz003Vd3R%SL(wTb&a zjc#YfP_O6$>G&ki44Ic7Su}WIze*aG$j#~YZZA)8I%%of*~voAxrs>(`oeq^9MyGv zSZr+Auvx>0MUGpxaHgfEdcjtGQE6a&=uX7AJnjAKq~Ckd+84>+Wa(Edt}4qpV}qqH zP+Pn5Y8P2&y|r;tv&_29daK)XoY|?}=t=`)B`B$O2>pnWP@aDG{rT|nesFbhbA5Sr zXH!H;*loc>otFOR+R^5MbS2D+Ybpa+sP!x`)3ZvnVZBeKXAx(Td5pT~?)+vo=yk}o zVosW@iP;)EG-ZqljXB&+IfyBvz6n`oWh!O#6G*p=$uRVi3D9G+JyXUT!Sn(so)5ue zp)Xw2?-bf3qP}?CEk2Hb6Xx4HFW)%+=hxf8{rT6JwZHk!m{72^)3ItbdzNQR$C@xLn25!J4PAW^9z!u*%W(N)##K7Gnk^qP_LZjJOUS85UxEH^9r`2)?A6^#j1NF z4Qy9wwd1eqH;Y2qK+EoooaNfI7FdmHt#b>Oa$~Y~)ufrKIFvhE{jE6)zP9^*>#|1t z$2{5MN=RM)&r$EB6#waT-n==)fA&!v$0>w-gvr@yJ^W|_%iV^F5S@`JL z{ZOQO{@i4pz9U@7{~~hwv+1%h^Hzjn_1%&UgRqroq*jbuL`Jh9c#2v9JFC>1+YV|2 zy@)vpC&}#<5>75BAzM@r9>I(CHSL0BohR@Pcp%DNPshmC;4xB%^6d6Qxs~>Kssong zY5pe86_b~twT-!4Y=dqv%m|k;rcQS8A!CvpTk3hw=M-3EuGpqt?o7olW7V31y+x^4 z2M51!)$YPnWIi~K#{oYo>0}d={U2~-r9~E4Sdsp;_@5AHS1!zJyy3mwl15j zQRXbHG)wfwVkk382>++O}RxV^s}{B%FOKL6$Z=Irk0Dum+9)AYPh@=61K&(`2%V?p8B5$$l+tu&ePkjo}$!vG)ES_j?F*5 zm=`naZkskctlWYd_foPN==r%F(Re~aVHukZ`xkM4W}OS%u^L3a>828C;rqWzpx-F2 zU4{75o3TodKF;$!-@dfBE(8 z;Qs3RVsL+T_F=GLhJwbueQrYEx^eC@H^D-v73U!<;gFSJ*Y>qp2`Q*0vRx0gDB{Zz z6eV~4*&Y+1{*q}fy$!3{KzBBqggQ5Cfzt2#$T#}GnV3-yuarzDG9wfw;8(bB9|;|+ zY!s^*z*&SR*u3-XE%9|cQ45b6u0_fGQc%I*ZJzXHcK_g=Co}@!4d_s^$gu-@37j0tfeQW)r4)7 zp|L{!)nMB|7gkl>+Ec9=zVfo4qR1@OiH8)CLyE}jD$C=)e1tt`CzpWh-v4=g+%Mn% zebeb3;=lVSCCN~W-ZIy^^rieH`NIUK7Yds{z1%wuZo2uB^*bP9-Y(eKT~F`6@2es4yLB#*@YNE*$6k930x$mS2INr}*( z`W-RqiWAeCqW0XK4rnWY9%I}mVV>V@ICKicPA0A4I8LJ~if>8ilW?-NM3yeNI->;c z`UxBH=s)1eL?jKJ!$K6kky%A6Zbh$?MWQrC;=HNqrZp7}TCC@a30pSnyHkKkM*eG5 zerpKsj&4|(wo`L^*BOd=zo^F8jC)_XswfQV1!`iaTEaSPokC^k zdWi-|UddF$Q3)gcv1BkiV4D?VO1GO=#~O9(upWOrJ-zCK;JW9(ZIXcN-v8G*s^tG4 z^$&mlWiO@X{P*RQo5{rda%ycK$}B&I?bKL4FLn0{tTNYs3)OWfJ1EQbf09Ry4byJp z39P&S)$esn`oI6?&Cx;s@1s;a<9SV$-w8^o5mv>vrrn2kePbm5KGLr}p?|URa`}I` xC0Hx}-Ln6`cicbR|J+O2m;7f2&O!1IDc zVQyr3R8em|NM&qo0PH<~Z{s$y{ac@c@D{kfdn^BllWd=W{EO72a=NY!;-P40hrRDm_xV_;Z?RNZ zeAj(4t@7l)k%lAkD+ozA^U$p4;3&%d+1_t=odBdSNTf{P@t?5(^q%q&8Y(XE1klOw z3|ZlE7!JWhC6RJwC4{d%1Uv^B zt<^sf5)g4o6c=+3g}!Kq#1~xhv1vv z2sUIP#Sv*71qS-f3@{$RsZqEqm=LK1e#%N{Y>EE5BE?r=X>YB4xdKW4eA1o>i=iMB zjr-xed4sNC3<8)BNO8m&ATChI1t1B&03~(z2v95`K%b|;3^u|h$jGnMCG*v2+ zUMI&}oA6G+ec4f1-pY>8*_cdX0dB+t*XK-O!a#I1KGz^~BSB_ZLAsRlj$`^SV@ijX z8xQ^VyYmLQ$R2p%JlVzh&oH+j=Qsq%ksxNohhyL8F;jY7eF0d3hm?o`y+QxfQ_;V_ zMF}jz7FeRsgi<|cJ0w73F3_CEZ8QX>mt%~n1VqNV6Me?abjA|&-vaT^tgY#BrUa)H zgr|4dwOYIAbR|Y_kY*O~F&ZW;B>X08p3>*SEPo=sIdn$+HbCTv5%$ZAqR}tB2*=6s zzj#tH4~>NKI5;u8n{9!Iuy$Bn^Uzw1pdMOA*ar`3*>UAuDhM5frd+CXP@lQDU2L10 z5{V=zr6thkF%9(m0wVE5+6$snZIIo!9HQB^mZHh(3tq-Bg}w$=)eq+JQhAki?hla zYvLhMGW`o7KZ-rn>vqFMPY9v04E=8R6G?gn{1Jn!7`HrZFCA4e&Bu`MJ&}BN=+#i& ztD?I1M5<*|yS)X+<1t}G%{_GgkSOvHDENwtq)TxM0&8bKLF=Mhn0u&Yv|Mls?V@cB zjkvWEiagSzHMZ8^jz}fb2QVIkuRPQ`=RpbQYMlEBewkjwM_QemBq5dz5saV1EjiSWP|D3C)RKQuS9>wOW_wtJ3$@ zUTb;u)2MEwbCnXlmhd=Zz$zBzxfPrb!+!`KN2%{+1;?@eqE|R?+rA`iBlz0Ma~w)o ze5XOQWK&Qc>ddg{P%`QyJr2^xo)X+c0gMP{j$INa=dE1?90cYErASQyMLdvB#Dn7u%BQ0Mfx)u}t6sqf0@!AZmU> zu`~#P$yg*Flpc*_EM{Z|3td8T%7t3j<@7o8AyS#wX{OQ9o@bPhb|V20n&?W$iSqnd zGi1$)>(f}9S=Sdt5g$`ceF%Q6p^dWvcM453r+VT%invg*mO4Ev5i_*#bP?9x2oszu zG@38G7_2>QgYLv)(-cN?1a~-$C?HS&T>|mYy~cekf05eZ=ykh2^x!N5J?uh0Uu`SL ze-wlf#R_ES{Nj2rY^yu*;O_5T6u_7;KrQ1?YPc41vqw0l3bk#P(WqwoSDSIrkQXx$f&_rRASGhvqr<-W zm4w51JSKOjYi&S1ROs`8@PCI753ZThs z+^i=Z`Nc$-w9Re(cMG*{S_aqh@<%bHi&(ARgoVFZw*gTWPR;g|j2H4yj=7?Thvhh| z86+^9z{+uG5vtWQL^%Pf{?&bWcx}DhR;#!F6=Bv5(}Jt*e{a9vFXjLDy6@f{?Ck$G z+AU#$hfa-S7XnjGz90Zck?hQRP6!GISm7*Mp!FzGxSV(}Yey2OBq~l{cn zu9zo0FKbxwaSmICp%7JEY<$s4;L<}TqE{zqc~#0XNjX6(;uD;Lhiv@6ENjUI)opp6 z-NY2EyUi6bvfNTFJw;#8A2C;e8aT?uU^7~U!0aP<%$%QWFWg-Ep6}C9rGrz;0?}kCccZ4-9Np=G?An!dc-NLZ+0-L!G2J zp)I+8TVC$VTv0;PQf37V#TA`#8i(-7WbMoRg}OJ%%B00IS+m@0zCgJF$qPf4yg?Nv zzi{X!fF*Wwhe2;I&v3eEX*R*lDp+z0liUvq>ru!*G|6ER*otARK@N)&SGRE%(r~>H ztG-+Uu&EI@;zAu&+u|aNSt_Ntz0Gn`#IuQ*tPlj<>ZND{F)47{C?%#0V-49bK2U>5 z7?|}Wq2;=>x?M|hqa`e!|=aU$X#aZdK2;>r5~`1&ZfG@Hq>lhx*+DOky_|k9)<(5eUMA>cRD= z@c;dCHMlvs8k`Q!ug{J@-h4RwIH)?4KL{R{=W3j$F`0bA(a$iyg7Gpyx-Cg%hJe5w z6ABe@rlLfzUd1zRa;=yy%FUK1k~bXHK1IyPo!9BqpHPWs4S!^!r!@<;OnPLfuzzc^ z;;pjm6&G`G{_EM*#rdbf`L$Wi%j4^xYP=j!el7*wF^eQM9YeVNZ^_vbhXtnX(_{Jj3>ZU=aUWFp6Sz&sJT0|S+BDG3&6nzqw@*@S{RPf z{GKcgQqmPJfnE_QX~zel9>Hy;1XM9=E{$zl=Yd*d2)Mx8Wi^M~jc&`rQzj7Il8%pE zC1R;M=4#=qk|aESVK(rzw5~KdSmm;5+iLx+tafmXi(m@5uFtjZeXsJi*l-8@n>)oU zpx{~~;Sv^0a6t$zj!a3?-(L5om#)>AJ1mTJTD5zunc*s?m52UkYBa~!;OYUP_~YvliuE9~a41UA)w?e`DL`M+Mbzsvt^ zqq(kINcl-yv(>)+(H0i;9Gf9L8J^kd?bXS^Vz^_Gxx-3fe|iOf#6&=l6|YA;IAdde z!OXe3aK%biBdKU^SClgwy&ua$f%6A@Da6!SEjKQD#6h83)7Nypgdg@Zyx8QWg zAeur50@MEl3$lj(GakT`UI=c;{~z|cy;A+xyMz9&{%b2O$^T;rIUB+T>3<^~FP!*) zbSl5Z)QctYOLQuIp1(r+zH(x(o?*{j+yf?4^M7J%l0Oh3)jx@v{$z(N9L>k1+V%c! znq*(fHd|rYJoBAZMWm!Pvg4&*`;wAAXisB7*d%!qj`TQO0tFvDRS2tIKf7}c& zPJX_*Jih*E8AEa9X?m|Hg-rwu6_3jH3+}QLHETW(L#$n~-&&m!VI4Vby+tk8*Z&tG za>W>R^89vcFI{?CMlg%<(AMuS7GKSVSFK((t{nR|@Re)|dU)tWG@cM9>!(=7#EZmd zv%v*fS&bqOx~YCvXu-Eqs$tF8a}{Z%ueWQfdNIoE$%KEE4CP0IlWiEZ?aT&l2w&$0=@f+}L9 z{bv@nlv7hBACJrkg$V>p(zlIbm{nd>rH3MTgeTa3%kw=6OfJ?aPbCXxj5%-rdNnyk zx*=1QqilE_`_pIEgk3?;S6JAYtV*Ll>Ecs7@r(5o7vjXcKaqo}H6)v} zxDf|6cI3$mb(4LfaTB&`?{upf+o;H8g%xa|wt-1pws>nVwUPU!Z;BN~uhul~s%&;u zHoNvswfg+O7-8RimX825)c+jzdZqmTUhi=C{{MDb$#m#VZ`=1U%||(s;vbt*z2X!K z1p)>d$7;$2`O_Suxw@g1RRd;Z8nA{t1%hFke=pX23UvKS0TEN{g1P#xYs!J7hkk1% zQSY3u_ggDk6#OKvTLLr#F){!XP_4J9MTs;&&1a+ZFLqt@Z-I71z>M2~pieOg*QOdr z0j{Q1tktTboIv;#OS$Ge*PIi~fT;~uE}6|+)QalJHBZZzMEfbvI-w^a^f?y+VR;p( zqG4)bo@LP-*L9s`?R-yIK-lCZl(h8Tx;siptzXO&kNyq5O0?9nJ}i`5IGJ<#n6DqK zQ*xxONfH}dHffvNC$tcQ73*BLSubizlUGWJZy#uwoll$i+Y&Oua`WRc1d^xT`HWQKjy=H#T zAb^jXW_kpE{X(|E_N^yr_xHOvz&G92z5hSuC4i>yzxEEh<@bN~`-i*x|2A5~{ePWRB5tS-sl><-DiDqEdPP(*IwbTyFFh2AMXh^>VJQ~r2l&d n`#b+{EA7?vKXYAnn!jtiwrjig3hn;`00960fz-aN0D=Gj<>I!5 diff --git a/charts/v1.4.0/blob-csi-driver/Chart.yaml b/charts/v1.4.0/blob-csi-driver/Chart.yaml deleted file mode 100755 index 1202d4e92..000000000 --- a/charts/v1.4.0/blob-csi-driver/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1.4.0 -description: Azure Blob Storage CSI driver -name: blob-csi-driver -version: v1.4.0 diff --git a/charts/v1.4.0/blob-csi-driver/templates/NOTES.txt b/charts/v1.4.0/blob-csi-driver/templates/NOTES.txt deleted file mode 100755 index 9ad135dd4..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -The Azure Blob Storage CSI driver is getting deployed to your cluster. - -To check Azure Blob Storage CSI driver pods status, please run: - - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.4.0/blob-csi-driver/templates/_helpers.tpl b/charts/v1.4.0/blob-csi-driver/templates/_helpers.tpl deleted file mode 100755 index 5231cd26f..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/_helpers.tpl +++ /dev/null @@ -1,26 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* Expand the name of the chart.*/}} -{{- define "blob.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* labels for helm resources */}} -{{- define "blob.labels" -}} -labels: - app.kubernetes.io/instance: "{{ .Release.Name }}" - app.kubernetes.io/managed-by: "{{ .Release.Service }}" - app.kubernetes.io/name: "{{ template "blob.name" . }}" - app.kubernetes.io/version: "{{ .Chart.AppVersion }}" - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} - -{{/* pull secrets for containers */}} -{{- define "blob.pullSecrets" -}} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.4.0/blob-csi-driver/templates/csi-blob-controller.yaml deleted file mode 100755 index f01ccf9eb..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ /dev/null @@ -1,175 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ .Values.controller.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - replicas: {{ .Values.controller.replicas }} - selector: - matchLabels: - app: {{ .Values.controller.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.controller.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: -{{- with .Values.controller.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: {{ .Values.controller.hostNetwork }} - serviceAccountName: {{ .Values.serviceAccount.controller }} - nodeSelector: - kubernetes.io/os: linux - {{- if .Values.controller.runOnMaster}} - kubernetes.io/role: master - {{- end}} -{{- with .Values.controller.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.controller.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: csi-provisioner - image: {{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }} - args: - - "-v=2" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--timeout=60s" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} - - name: liveness-probe - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.controller.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} - - name: csi-resizer - image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" - args: - - "-csi-address=$(ADDRESS)" - - "-v=2" - - "-leader-election" - - '-handle-volume-inuse-error=false' - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} - volumes: - - name: socket-dir - emptyDir: {} - - name: azure-cred - hostPath: - path: /etc/kubernetes/ - type: Directory - - name: msi - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.4.0/blob-csi-driver/templates/csi-blob-driver.yaml deleted file mode 100755 index a742b506a..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-driver.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: {{ .Values.driver.name }} -spec: - attachRequired: false - podInfoOnMount: true - {{- if .Values.feature.enableFSGroupPolicy}} - fsGroupPolicy: File - {{- end}} - volumeLifecycleModes: - - Persistent - - Ephemeral diff --git a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.4.0/blob-csi-driver/templates/csi-blob-node.yaml deleted file mode 100755 index f95a1fcda..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/csi-blob-node.yaml +++ /dev/null @@ -1,198 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - selector: - matchLabels: - app: {{ .Values.node.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.node.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ .Values.serviceAccount.node }} - nodeSelector: - kubernetes.io/os: linux -{{- with .Values.node.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - {{- if .Values.node.affinity }} -{{- toYaml .Values.node.affinity | nindent 8 }} - {{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.node.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: liveness-probe - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} - - --v=2 - resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - - name: node-driver-registrar - image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=2 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/{{ .Values.driver.name }}-reg.sock /csi/csi.sock"] - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.node.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" - - "--nodeid=$(KUBE_NODE_NAME)" - - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: BLOBFUSE_PROXY_ENDPOINT - value: unix:///csi/blobfuse-proxy.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.linux.kubelet }}/ - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - - mountPath: /mnt - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} - volumes: - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/ - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins_registry/ - type: DirectoryOrCreate - name: registration-dir - - hostPath: - path: /etc/kubernetes/ - type: Directory - name: azure-cred - - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - name: msi - - hostPath: - path: {{ .Values.node.blobfuseCachePath }} - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml deleted file mode 100755 index 575437fb8..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-provisioner-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-provisioner-binding -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-provisioner-role - apiGroup: rbac.authorization.k8s.io - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-resizer-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-resizer-role -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-resizer-role - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "create"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-controller-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml deleted file mode 100755 index b4a30373c..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create -}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-node-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml deleted file mode 100755 index 93c5c0149..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml deleted file mode 100755 index ef4c7d754..000000000 --- a/charts/v1.4.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.4.0/blob-csi-driver/values.yaml b/charts/v1.4.0/blob-csi-driver/values.yaml deleted file mode 100755 index d34fdb367..000000000 --- a/charts/v1.4.0/blob-csi-driver/values.yaml +++ /dev/null @@ -1,148 +0,0 @@ -image: - blob: - repository: mcr.microsoft.com/k8s/csi/blob-csi - tag: v1.4.0 - pullPolicy: IfNotPresent - csiProvisioner: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner - tag: v2.1.0 - pullPolicy: IfNotPresent - livenessProbe: - repository: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe - tag: v2.3.0 - pullPolicy: IfNotPresent - nodeDriverRegistrar: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.2.0 - pullPolicy: IfNotPresent - csiResizer: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer - tag: v1.1.0 - pullPolicy: IfNotPresent - -## Reference to one or more secrets to be used when pulling images -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: myRegistryKeySecretName - -serviceAccount: - create: true # When true, service accounts will be created for you. Set to false if you want to use your own. - controller: csi-blob-controller-sa # Name of Service Account to be created or used - node: csi-blob-node-sa # Name of Service Account to be created or used - -rbac: - create: true - name: blob - -controller: - name: csi-blob-controller - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting - metricsPort: 29634 - livenessProbe: - healthPort: 29632 - replicas: 2 - runOnMaster: false - logLevel: 5 - resources: - csiProvisioner: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 10m - memory: 20Mi - csiResizer: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - -node: - name: csi-blob-node - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - metricsPort: 29635 - livenessProbe: - healthPort: 29633 - logLevel: 5 - enableBlobfuseProxy: false - blobfuseCachePath: /mnt - resources: - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - nodeDriverRegistrar: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: "2" - memory: 2100Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - operator: "Exists" - livenessProbe: - healthPort: 29633 - -feature: - enableFSGroupPolicy: false - -driver: - name: blob.csi.azure.com - -linux: - kubelet: /var/lib/kubelet - distro: debian - -cloud: AzurePublicCloud - -## Collection of annotations to add to all the pods -podAnnotations: {} -## Collection of labels to add to all the pods -podLabels: {} -## Leverage a PriorityClass to ensure your pods survive resource shortages -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: system-cluster-critical -## Security context give the opportunity to run container as nonroot by setting a securityContext -## by example : -## securityContext: { runAsUser: 1001 } -securityContext: {} diff --git a/charts/v1.4.1/blob-csi-driver-v1.4.1.tgz b/charts/v1.4.1/blob-csi-driver-v1.4.1.tgz deleted file mode 100644 index 4f488cd26802e4a8423edf9b71b5ce315bc7523a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4502 zcmV;H5ozupiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH>ebK5qu{h5CSM(ND;-3#^QkMwcIKRC8>^)#_PmfL&xIvo#0 zt|Xj6fCWIss^kCt4FFR75Gl&CEhjx8Gm%6tb{D|zVzIlVN0g6TUlKPEWCo&hGQ~o* z=QyO_JzMK`yWPY6ee>DvcFUjL-r?SNz5U)n|FGBZ9qfJA)j#&XL*3_Np}xgZVewt} z$+XIY`$ig$$gdzI;mkv`o`a((_p`mQPM`)SZ!d=0HNG0%7WWOg8=3OQXFvxhzk^Q0Z4)`KuLXh1SpmepwCla1{+}$WaL+B6avN`nkp4Z zuao1gO?W5ZzU(M0Z)L~lY)mGx05{@+>vN_sVIVphpKFl0ksvdyAf3v2$1&G0V@ijX z8xQ^VyYmLQ$ZmMyJXyv0&oH+j=Qsq%ksxNohhyL8F;jY8eF0d3hm?o`y+QxfL($*g zq68LU3oOxRLa83J?Gm6d7iiAoHX4G`(=oC=yNcc_WJf+Wtnf^q2bLfotZGgxTBkY$aMWbJM5ss7N ze{rW`92yDbad2W*H(LS^VePQE=BBk6K|QpLun!*6vg69RR1i7{O}SL(pgwbPyI3|g zB@#(cN=u;6V;bo31w`Tzg+Q-K#u^GZNW{280Ytz8#2NZDJWIPf2!)^o@#Q5K%0vDC zIo#W?Hc?Z+R82FmzGJkM5+6$snO`xxV4tv5dby2!XiR+a5oSO=bYMD29t$6&#aU&I zHSv%rnf^k^k75t?y4`SbC4|sehP`h06G^TL_#*~c(Qmn1KkZd9%*T-LJ&}C2>D5r} zS5fUhk!so0Zg0ZzcuW{ka}V7=B#Jx)3clhZxuiG+fwi-rpmotL%stdHS}r(+cG0$m zM%-EnMIPzi8e3~{N2HSJ4H%EXR~~Ae^PxY5Ag0h-3xA?X5ycEPqv9BQwQN50rf z|0oC}iWSJt`Nj2M*j9Jy$uyeczk7%MQvB!O@ZjAp{1InKh~kt60885d|zkn-I@QHW^=0}l11jaGTBX!TEapu^+FF1E#$hNvGS)oTU2QE`hPP88i9~)6+JN&+@Sxx{k?8U z{|^uL4|e*$jdp+E`8%4C(9@m(8WRd?9>Gzlr^TOwhyLDqcyJs8Hn@v03yg=sl1PvK z?wgdMfxEwVQ2=AY0JV%mso`44&2HhCD%7@FMx&bTUv0)gLte~42oeDLf|Q7vj}G_D zR}v26@tEA9mYX317MSmrx5Nn?bAhHn!|d>bsu{EJ7MuO9{ahSHrQ;`IQemb~cdaU` zQrRcO3{N0%NApszG%i-x(R$*#OR9#2X>8YaT%_0xIXk<@i&S3HP0vePPCI733ZTho z-0Vu)^NWcvX`9>n?-pv^v<$B0>5pPc7qMEs2n&C+ZULe!oSLmE8876a9CJkv56f{_ z(@9`9ftCHxB2=qKh;jl{{ndSVcx}DhR;#!F6=Bv5(}Jt*e{X+puay7qci+A1?(F|I z+AU#$hfa-S7XnjGz90Zck?hQRP6!GISm7*Mp!FzGxSV(}Yey2OBq~3aJqNY{DPXlB*@@dE{PkS|)rCF=q2Xw32Mam;w>RlwhG_UqTAy-tr);W-@ zUolU3Ue>VU?HslYLm{fTSooroz@>*wM6XWJ@~V_&l5&Dn#3wif583#CS=N#Ts$23r zyNM}ScbO|DS=&^2X<2R1rCTm~jFVt6)tV~)glQqk|<_nY?ki0Nt$s1H* z@(Y_@0$5@6{+nB;s=n2$pKp-B#lz*Y=f4RTnNxVnY2kcR7p zSnbIr0Gk?dBQDfYwJk2Pn59yR%iAm`MLe5`$qGTxtzL>Y5R(G8jZ$LDFxHR_;{!E_ zgn?O45?Zc1t6Q}sH(J7yS`rpSoG^9t{SPO@vzx*B>E*@Q`E`}iN)ogSdF!^#Kd+i> zE}dIV*&d+utmPTES5p_S%Ep=uvrQ1W_5|&gk>uE@%s)R4a~4@$6MgEj=V@rQT3WTm zzGoAXPtiKGFpn1E&(gg}aK(L2J#>9?xd|J^^{k|vfUHTmGQV!y7!w)`xSk4-Q%(b$ z*3a5hspub|yfP)L@ih}*##VJtyUqmDQ=oW01fR1&dT6g(fJyA;;&!iiI|4y?P~EwH z7yiFrt_C+JSA)~R`SscH$D0pl9|u)?@(01g@>q@2G$xZzIQkjpS1?`%NS7t4%n%T` zV?v<<&Qz4>*{it6O|BKwMY-AXK=Ov8+NX#ax$`=m`W-6qtl^JL^t5K7mPwBc74~mU zR=ib~z2amJ&VN0-x;XzdIKMWtd3k*OQ;nAc%Fm^sJ7$ukreg@V|1CLN;;`VfqP6zA zMeJ^Co=~}SX`q#eG7(vUC@p39jPV4R>U^?c+cUiz5;b>+HtSW^e*rkSV02y~KnufB z8sC$pK}x#9CD1b>CGGei)FZg9lz=K`&7rYv>pW0v3;`E-yR7DryU}e~c*+E#Thj5d zt3)hS#~dwuRg#3qPs|3Mme!R<2di8*ZCkCMmDLWeaS%))*Y&y9J?~YX78}lhe{-jp z1r%IsBwWH`2`&i1#gQpV`rGTC^wPQd+THhz)f&;Vj8;S*D*1yjQu9-imZBAVRGQH# zmv%5kkD=K(t9b+-Iwit{edbHlidJ(anMa%MjVy4rNoyScvVE&CC>vPWXJe<{nl>V9 zFs)5N*-~v-J_1@U8e3L}a_7oucaDSCPOW?sZH@e2a)jOdk-(<
    D)a{jOT?r@j? z+eUL;w~+Fawq~n+`=c!^=s7k+crrY*)!VC+fyHpgB6Ek8!v6FM{)mZyAS+&vcyPwX z{DPT%b>WJYtVUAN+^#5RHhMpng#zaf_E4B*BGNtaBN>A~_bGhh0W3-I|ZU+)h3yZW!Kv?TwJA>?cb8>Ih@bi8ol z|Iw-Z5>qdh#4pjQ^m+aY>HEroy?TZ{cW@7wOwIp^tx5hsgjD||YWkDyvT!sXlWN!N zyJ?brDcfv?W%JB;Ruz$w*2s>RuG)u`^hSFc3&JMJlW>Gh&L)g6E)VX&kIh>>3eJ9> z!D|qqEC*s;tAK;MNPv{tLrY~S6NJ@gzKXl~NBGXUI%AbE_FWWJ*e!(_5h}*i&7MWd zq$Ve)X5RBfCKXCpglSepd*L~25ex@gt3p_v{C(kU=fo6Fs&VDmw}G!@QP9IfC!+C$Fj+suDkfee zKAUwe$jWLIdC*PuvqB5Ll~N6B#-6K4BYnJGTh+UHzUTXo7w`yYP(<^AC1{Ni+QbAJ43uwjjfq5YcGC5C~Qs4J;Q{0tQ( zkJ>Fat0mc0bnGfRmQdB0oo1{>>r+plXjImp?J)rw)J${fEsx6prnB9@^oieSmwAU( z9_T-_sHL2mD*1S1Mkq`mSdzYN6vM3YqAEQU!6Q7u_FbOuNnmoZMtLe(Fn!E^``4?< zDbfv@s_bRMrVj|HTOV?z6lEprQWfu-7Z)|NFhe-Shw3X(iL47rkwt!!$4DNQ!@KO7)6U zC=>`7XdJ647vxW~jppixR#pv|o@u}u?i2`yY5uub^D5BwD+NSMtqbPrv#u!zk{?Gsvv!HRjVTdWr~rO6{DU!xga!vs9D6@3zgj(uHWnb3x#sJ5F= zGniYqK3bl)ZF@Eo=)Zu`DGdA(2IpDrjb4G;)ye%i37ZWxtUkt;ByWyukj+FSf3F$e zGYH`0hM8`GU%!xTuzc%D+Wq}54)9I4bYv(93Nhue5RGhxN=`M!r6ME1SjA679B$Z@bmo|3uW1zeo0zNO05hpL_eI_y70z z-tC_M+)i6c9Ti>DY$qs{5LKG2l_$CeZ}(Xr9Lv9;`n6~H>u!(N|HpfRjrza0U()~n o!T!$w+e&*i{m)#No#yY_uI<{cy+Zr{00030|3t6bf&hX50F#K?ApigX diff --git a/charts/v1.4.1/blob-csi-driver/Chart.yaml b/charts/v1.4.1/blob-csi-driver/Chart.yaml deleted file mode 100755 index 71b770425..000000000 --- a/charts/v1.4.1/blob-csi-driver/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1.4.1 -description: Azure Blob Storage CSI driver -name: blob-csi-driver -version: v1.4.1 diff --git a/charts/v1.4.1/blob-csi-driver/templates/NOTES.txt b/charts/v1.4.1/blob-csi-driver/templates/NOTES.txt deleted file mode 100755 index 9ad135dd4..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -The Azure Blob Storage CSI driver is getting deployed to your cluster. - -To check Azure Blob Storage CSI driver pods status, please run: - - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.4.1/blob-csi-driver/templates/_helpers.tpl b/charts/v1.4.1/blob-csi-driver/templates/_helpers.tpl deleted file mode 100755 index 5231cd26f..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/_helpers.tpl +++ /dev/null @@ -1,26 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* Expand the name of the chart.*/}} -{{- define "blob.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* labels for helm resources */}} -{{- define "blob.labels" -}} -labels: - app.kubernetes.io/instance: "{{ .Release.Name }}" - app.kubernetes.io/managed-by: "{{ .Release.Service }}" - app.kubernetes.io/name: "{{ template "blob.name" . }}" - app.kubernetes.io/version: "{{ .Chart.AppVersion }}" - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} - -{{/* pull secrets for containers */}} -{{- define "blob.pullSecrets" -}} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.4.1/blob-csi-driver/templates/csi-blob-controller.yaml deleted file mode 100755 index f01ccf9eb..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-controller.yaml +++ /dev/null @@ -1,175 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ .Values.controller.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - replicas: {{ .Values.controller.replicas }} - selector: - matchLabels: - app: {{ .Values.controller.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.controller.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: -{{- with .Values.controller.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: {{ .Values.controller.hostNetwork }} - serviceAccountName: {{ .Values.serviceAccount.controller }} - nodeSelector: - kubernetes.io/os: linux - {{- if .Values.controller.runOnMaster}} - kubernetes.io/role: master - {{- end}} -{{- with .Values.controller.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.controller.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: csi-provisioner - image: {{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }} - args: - - "-v=2" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--timeout=60s" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} - - name: liveness-probe - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.controller.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} - - name: csi-resizer - image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" - args: - - "-csi-address=$(ADDRESS)" - - "-v=2" - - "-leader-election" - - '-handle-volume-inuse-error=false' - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} - volumes: - - name: socket-dir - emptyDir: {} - - name: azure-cred - hostPath: - path: /etc/kubernetes/ - type: Directory - - name: msi - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.4.1/blob-csi-driver/templates/csi-blob-driver.yaml deleted file mode 100755 index a742b506a..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-driver.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: {{ .Values.driver.name }} -spec: - attachRequired: false - podInfoOnMount: true - {{- if .Values.feature.enableFSGroupPolicy}} - fsGroupPolicy: File - {{- end}} - volumeLifecycleModes: - - Persistent - - Ephemeral diff --git a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.4.1/blob-csi-driver/templates/csi-blob-node.yaml deleted file mode 100755 index f95a1fcda..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/csi-blob-node.yaml +++ /dev/null @@ -1,198 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - selector: - matchLabels: - app: {{ .Values.node.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.node.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ .Values.serviceAccount.node }} - nodeSelector: - kubernetes.io/os: linux -{{- with .Values.node.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - {{- if .Values.node.affinity }} -{{- toYaml .Values.node.affinity | nindent 8 }} - {{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.node.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: liveness-probe - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} - - --v=2 - resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - - name: node-driver-registrar - image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=2 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/{{ .Values.driver.name }}-reg.sock /csi/csi.sock"] - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} - - name: blob - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - args: - - "--v={{ .Values.node.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" - - "--nodeid=$(KUBE_NODE_NAME)" - - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: BLOBFUSE_PROXY_ENDPOINT - value: unix:///csi/blobfuse-proxy.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.linux.kubelet }}/ - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /var/lib/waagent/ManagedIdentity-Settings - readOnly: true - name: msi - - mountPath: /mnt - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} - volumes: - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/ - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins_registry/ - type: DirectoryOrCreate - name: registration-dir - - hostPath: - path: /etc/kubernetes/ - type: Directory - name: azure-cred - - hostPath: - path: /var/lib/waagent/ManagedIdentity-Settings - name: msi - - hostPath: - path: {{ .Values.node.blobfuseCachePath }} - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-controller.yaml deleted file mode 100755 index 575437fb8..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-controller.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-provisioner-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-provisioner-binding -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-provisioner-role - apiGroup: rbac.authorization.k8s.io - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-resizer-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-resizer-role -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-resizer-role - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "create"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-controller-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-node.yaml deleted file mode 100755 index b4a30373c..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/rbac-csi-blob-node.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create -}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-node-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml deleted file mode 100755 index 93c5c0149..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml deleted file mode 100755 index ef4c7d754..000000000 --- a/charts/v1.4.1/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.4.1/blob-csi-driver/values.yaml b/charts/v1.4.1/blob-csi-driver/values.yaml deleted file mode 100755 index dfdd2a2c0..000000000 --- a/charts/v1.4.1/blob-csi-driver/values.yaml +++ /dev/null @@ -1,148 +0,0 @@ -image: - blob: - repository: mcr.microsoft.com/k8s/csi/blob-csi - tag: v1.4.1 - pullPolicy: IfNotPresent - csiProvisioner: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner - tag: v2.1.0 - pullPolicy: IfNotPresent - livenessProbe: - repository: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe - tag: v2.3.0 - pullPolicy: IfNotPresent - nodeDriverRegistrar: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.2.0 - pullPolicy: IfNotPresent - csiResizer: - repository: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer - tag: v1.1.0 - pullPolicy: IfNotPresent - -## Reference to one or more secrets to be used when pulling images -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: myRegistryKeySecretName - -serviceAccount: - create: true # When true, service accounts will be created for you. Set to false if you want to use your own. - controller: csi-blob-controller-sa # Name of Service Account to be created or used - node: csi-blob-node-sa # Name of Service Account to be created or used - -rbac: - create: true - name: blob - -controller: - name: csi-blob-controller - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting - metricsPort: 29634 - livenessProbe: - healthPort: 29632 - replicas: 2 - runOnMaster: false - logLevel: 5 - resources: - csiProvisioner: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 10m - memory: 20Mi - csiResizer: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - -node: - name: csi-blob-node - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - metricsPort: 29635 - livenessProbe: - healthPort: 29633 - logLevel: 5 - enableBlobfuseProxy: false - blobfuseCachePath: /mnt - resources: - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - nodeDriverRegistrar: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: "2" - memory: 2100Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - operator: "Exists" - livenessProbe: - healthPort: 29633 - -feature: - enableFSGroupPolicy: false - -driver: - name: blob.csi.azure.com - -linux: - kubelet: /var/lib/kubelet - distro: debian - -cloud: AzurePublicCloud - -## Collection of annotations to add to all the pods -podAnnotations: {} -## Collection of labels to add to all the pods -podLabels: {} -## Leverage a PriorityClass to ensure your pods survive resource shortages -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: system-cluster-critical -## Security context give the opportunity to run container as nonroot by setting a securityContext -## by example : -## securityContext: { runAsUser: 1001 } -securityContext: {} diff --git a/charts/v1.5.0/blob-csi-driver-v1.5.0.tgz b/charts/v1.5.0/blob-csi-driver-v1.5.0.tgz deleted file mode 100644 index a23a369b62bee65b5c1be3d16937d07a1471a93c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4690 zcmV-Y60PkYiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$Z{xPI{p?>sxCKt{xs@MzZLffQ$R=)2(A^}6v%UA+q6lbd zZ1aR7Rg&_?P4nLmNJ`d=e#UXOJ>~)%TN=&`hcm;`a3~KbA9#TzUMR=}MEiVvTGulcOW^x6|oV|8}}3hhKG%x(D6Eqj$##-LE>`qwew1SE#c+7Us7^DlES0 zyqH#XaDS49WAZZyNjUS-q}#x8T=?2LZgmgxWFSo=lx4$ zm8ZdQ1U{;$RAW*>_|`+-w?eDh{{+(npK8ga-ibx>1#xx*3_$=304a_*1H=W2xBw&}5TK;KJOGqP2+`*;FoTV-5i*J> zH3|VkAB~lYrQa^_)*`$e@<6r~mJhNWa5f~PM1U9b&uu)1P2!L9TEFjfMm-PHv~2@K4Kct$ zRVW($EF5Sw3jELRIL4uoP@aV6W_I%_@DbLLhi7hD^AG1k%NPgXBdt50oJs}Sg^jsX zSD-#~@i3boZ z@4k?HzCO*Ndf=jZ@ItCp&$>H-Jw@xG2blV( zX{=mu3azqljf}cAQ;Iy+y*0LG?~zC)vl}oRLZE!qyyE>}3}Hf{xf1@=l46P(Y(}MF z{8in3=*zF@I3oP`iQx$*RC@&`jZO2BWZ(PAa7Z3~)V%KZy)n?J*=SJ0 zl1GhJTL=Z^qxJ-gHYJ00dZmH%@~H&(Q3wNqS)*YrElnNVBm+uhawsF?8XAfRKo?>7U;wht+89kH1EU*3xFBkJ zPO&ryfXPIpk(lm{WFjVH0yFzVa?FKVHFOO)3m{f`^lFCD))8!+lJ;T&5SesKyAc=p zvF^;eB`=_fcGq4Yh#~=|ntC6CL_?dP1s)X|X-@UPc^q@060NCvR3c$$7BM5NqZ%eS zS7;~Pr#il6?rU)K!6jMOH{$2s`(UZn~Cbd!-gYKf|MjhyRp9A_*S~dTp zAc`qgAlp~hcfEc~J*pSeSeE}iJUOW3e~wR%-|h21yJ&Y~Se(>FM50mZ6GDh7pMvqJ z#wDg+sMToPaTJUpcvynW`cQ>cBELg11uQ{(48M`CDS@KM^H>&&ojyIG)-4#g)+|6n z&(E4Ai_G(6y5OERg@vs3LeI}lzW16j>?T&+nyqz(Vuc@|MPYLoSexRrBW~N6=$Ca zGdzOO8%!&`vUJ+Kjy71=T{1H)iLo`e<7|t~kh9V~n{DN#-Lza%NJF>GIZVN8<|J{>= zMX?4o+bW)LmaxcNl;Z!y5p~@E1F@9!_f>oa4_8KA$fo3d3W*pq)H=mtx7mQdulQ4q z%1)lX_?f7&Yxr3Xtw27@sugK3CR3A|dmYftwTe_nSnFLTUX!c6P}*X0&vgML?^oVl zysBDQ`E~(Yj-ixzoK1W=NZ`^(CV@9k(Tb*&Z$K3ksf3O=&i231{4CurW37uii+ z!K%~jNQo6hRlAG+K))qi0b0UQAx)dpD&=n1;4u$=^11K|yB5Xrg>(+aSl$RQF>bqA zR>M|#Z?Bc_-xrNM)$25_EA`JxJJ9oU?cO5X{xWEVM|nI*C9KHL_GlJ`ZXAag6+~{u z-t;D?g~We$(F8p>3ut}A3t1u~NrF#gOIxQcy(}Inbjb&oZi#o~@op72& z@X=H`IWh26qR*F7jQoCz)4P4R5a z&I(3W+JD)ujV(Z3SNAgTh8DIhimTXH*^=fdq=huIF*BD#Sp%?JUS7VFx}ChEUH!F8K&9UG{BiQA>xZ%S@f(0LQMZdKLm80-3GU^Z0j&G4_bOn8%d zDwl59r+Ji0FJNKJ2~(%ve0$!%yzgCI++1H?-MLnhv}#~xES;A5=ex-kY7E?z?IXLM z@*?B*YW7g!TKLSqua`&=9v%Txj^35MVK<^2gC|XX&Vi`1ojivlQ=DP zIVqzxJ6)y|cHIuYe6vH($5z4I9xKi>Xmw=OPq za6#16>)V~*s{ISV;WeXEhXTzUM`e63)&?o*f(T1XL`vEYK&Umib!vc%S(7+v+0rJ| z974zi-Yu&I?x5*)izI{fvRZ2`hwXYaMiZ5G+r&9I9M$ol9t_W zJs0A{>&!`HkelZ~UPx=Tpmx7rnnJ|Cy={IY z6Wf>maF??>$5Je#j%1>fKZs&Ay&zcuYPpAVRTy(=2U8A!majEx8iS86h%lMlX^mQW z5m-oO?UiVw1;y>5myW;gt{o=0!J@G>cIGb*OOY*EDVFWCRH{3Rn;O-!&$>BOJ3BY9 z3LL!kZs}jIt+4-}KFM!>BzRf<=h5MDb^pJ!-~ZoD^E|J#*b0I3 z{-r(OvA7tZoP3(?++(G1FusM~5+WeX&$q-pyktXu&CDV^3qva|CO!1fa-4WDPGTra zXCQvC55)OSWVV2QAVUbI0fmn|gxP6~8$EQX>{(fTp%;%K0)gp&g1cG4{uvM9MK1(c zQvdVrq;~%Q-SN@B{(C1at^dIgan^?o%72V@ymH}B5{FnpUkR*WG)+&jb8~L|lev`z zH`SwhRX)r2vk=2ZvA0Epo74|gB=KsMLlrvCsO+y$GvplDo0s(zG9@9C*_v}hB7dOkO;uP+GR$(rvlq5m@MJjHS*cT{2qTy3E_UbF%(d?;ME=|rB1W@U z-B#6jCZ`80XYCB~O^Y$y@9Q2_ zIeIZxbK@$o zZdafwG%hUD`teG@K*m%Da{80H(7I2V@L^u`ct%$s*8yW*J>|_qY-=lL?_Pg0QB2 zyC}a~6qGsxKEY!=!uAc??@4INC5-VnSulM}1n}3Z0xZ*wm~!^An^HI!Z`l&|C4^si z31Q{&W;xWOEib;$V&+d9k4G^YXkx&~9c>1Z!l!Hkh{nAX+*@dX%mu5UG_)3F_6lels zU;sv-n%|)&CDQyguMN~6tX%Y`P%nvK`fWfkpqNA}Q?a zm|y}-Z6LXHG~c0SJdfP+VFi-t6?3ZtdJ)2aa}g3&GyxY4Qwy~fp*1|uYt*gtJz*hX zqt`If%6m(^K}u@%VyrIz)6sS>7-#coxO^k9#ny&% zTKGh_B--8T!NKAW6uh;BzwEZY{jVPhF17!|ql*0>93SoDzn!!Dc zVQyr3R8em|NM&qo0PH<$ZyUF=`K(_-D8)(dxmi8@N{;}4aBQW=Xkr_d(|gY?3WM4m zNvwCtEy-1^CjRdiB=^as9+qV#ZN&hw#3g5j!8hbJ?2V zD15Q8(&=Q6-|8}}Z`!Bi&-Tm&Hqwbsi!xx?ILHFq31?p^$g!v^A67v_G zC*!gQ_a`YhCO-oggwYAQYyCoPjQ%l*sK9qqW^EYM{jKXKiEIq>HjtgiSQVD4TOeR zz%|6Iha#W1BI0u<*hsc~7OAHp8>&_aafq29lJQv&weNlqZC{XfCPli1#N%Ao)%Q3F z!z&gNf7V0iqYEalI0&G!fsilA6=zeT>;rHI0wzTJE*S!+AVDY;3ZEAjok}S8T6?X% zWhg^31qwo3aW;h2FlJq1)un>(VEGU!3*b!a)-{ZYkQ_f^oT_uGnI6x&S18zDiOJU> z$RCekFgG14^SKmN~W6^7uXloJH4wx_65{o<0_8A?Kal*liS>XAMN=zv5wu0wrU|!706idiB zH)}U^{Z$f%14B&@{qk$$CGwD2xuRJ*N3)+`W|~~!2pSCmd`f&c@qLz1so3aqz!G{$ z@&wRJ^gn7S>ho)qwnf-97U&ZRLp5g8B|sy_(TpW6GytjCF~Xq$L`JF-eZo{%CIZxV zj@T#KQur8^oP{Cqo|;h4Xk1?D35;GMMJ!??G)PHE=}mc_;b&a4Kb6fAI4x~!0BZ0d z_HCvp@VPtCXq4#BpE$;$fP^dwPW9{-Q_w?Lc^;mAXw*NL-&(}jhaOV0x1RjPEs5vP}LjePUD3d6F7-)bPMIQ#|S@#A) z5lBvaamBdop}l_{?H{=9%>-~LCq*-5vpB>d@v-P3{Vk!F^aB=BO}Ca01&NK{!xX|E zI@BEmOSlihz;szsg*+lsWZw|-lNf!IzAuRg#6kvGUqeupYXD zSr0X}mU9+Dt7=;#EpE*eBa2mU1+Ce?Cqjzs0gOiA%N}Z8uz^2;APJ$l68uz?;t*3< z4@*PatGfA6)z4@+B7Fam;wdJfvI;%ab*cK{YO20p(_^3ts@hSF5&>77-Omb@AEwo( z*q^`^mXjW8M|x4W)c-51-&`l>o3a7Ty=J}l)3C0khfb+qNq8C~z%t?bxe=UqgC99d z;>;g2g3~ZAW3RFXw|qfbS{YKG-cN*N(MJLNWDHa(o1!Dhz7LYoh}`#3^KvloCJ;u= zMk6FNxmRG7hJ+w{s6EBJ9g<-?t!W^&d`ixGD1afsw9(L-mWB?ll3_^vQ&pqg#Z$Gk z`ieWs-N2MGX?Uev4IBjePZ%OO0Ti=9G-4K<6sgM8Y@cGz*!tKE)ylGYE(R-DEF8V zR$dL|j7c<{<=zlh9=is2YM?0$!x@5m9K|7^p89Sz>7maG_K8qJr4_o19vbyV54#x9 zw^Ho*kAx@=u>{e+xV-5PTJl~#8OO5t@BY!A75_OrI(oB<|7@e&Oki#UWq;%3|~y>6+rp5P2TWT(RTNpHb@? zbh}pWprMBcMUnyLc_LkKkDJ`Ito1?<4^8BGpRn{NJJVAs^Y#CB0$~hXv}F9mQ1BA{ z?;h-TEd4(^Jlw^9w^BZTZvPWaNz_vdKN^t`FCTiX`HLN=O<0K685U-kq+`XJP0dWz9E#{Pra(S>rwdmoY z9=_F`IHqG**bn6qUNu5g5+L_S@8RKV>*cn}eEXlp9=w>d``J@Y1ee(V!@cfd)&B2w zx}Dwm|5l2nb(96ra)hL`hL&>$a2$*FwA;8NH0Yr-3=yLPkc|i=4zR@aFkK-_=_5z5 zO~jxN5;B+0#6cqEx4qAev?@4`=RnQ`IPF>@N5_RKP!_>KC?iI<%i=tFPsOQ|TCIs8 z5e~sw!U-Lx!AnBN=VQw9>i#|WiO#4f^{j@EZh%Lk`dFDU&5Qnh%t44ErewF$kyy2m zJD!%Kt`|(6Q)e5K2jGHkFiZ)T2@bs??4yd}#%XT(i{6>}jXA`agEu4;a{{6S-W)`m zRjF*sTY9x9&MeWxL(knt;&HmtWyNPvgq3{$@=8ozy+*G*|J7^M^kZaMxAs3<0Sr-F zMsd6OYYD=Xg-HY-^g&*g86JvMZ5ou7QEaiw3|L077ZBda=y9I;PSkf1ddv}N=#~PYAC)~j8lH7cW+B>5Z{fF zN6Zw(*uTSL5MG|9pr$dTrWGl( zj;gX^AJLdnKdd#yb!3}?w-0Sfkh*gLbt_=(7=X3eX# z)I(A$U6n8HwNLpSJ#A-%Z77*7aufC}(EmkG*peBw~-QUd%;VZo-)DM%uj>)yq z7er_PRH8lAFx!X~IZ|%)*>gj}YJ13K1DUs-T!r`}1L6_bOtJRl`GIv7qLS=CI0_xl zzaW~T{=tSk|5WUtPkz z7)_09?l>SSJ5Z%4qCLV|?<(>dUmYHSd#+1OihkAf(hFO|s*g+13JB}?7d4iZRjEio zED2H-9%mu+kVzY}Wi6ecc~f2{H#G&TPP4-+DzU2e6n#OzCrkobLQ%;@DsWZ&rbp1| z-sczLuyXo(ItLRht~ls3j&{@92vl=BtJ{W^HXCVbKYDnmJzE63z9q3*XPXnvfKb4S z^lT1ind#ba@Nq3n)AXjtd(B#4ni{q#jH~EZRg>lkq-m*VBUcwI0;3w3lt(XSTpqg@&?>b>ill=x3+2BGXC3ih zAmFOZ*N!#Bze4R&>DN~-R*?C66zZv>{^+>YBL#as_^T}w-gKVGr3u?sATBfSG>93Y z^7y;&PY36>{fo1!%kzsH*GiIRjpxeJY3YAGH`-E;xf`?Dva2aC6K-m=O@(XW(|NyM zB7l2%3{*OLTe-n*Km`O(===-^AS<1C4n;aS-{i!4CX2wchgD*XGj;!Qkc;fH6~Sk_ z)-n!drD}4N`+g>G`zb~Y<*TS^@rU&+=1el5g*|k0dbJK2RS9!qPMfTV*?A4Ylo2Lj z!r^AZK}=W}nA;CUnMfJ`2-1$?rZL7j4A5h9JyY99g2OWi@oWG-qk-t5{pv|Vx(wxy zyVb`raKZw2=lRas|9!mf-=1Fg&-xcP=O^!P-<`kjyIi+-oJIB1RU@b$aQqX@u3_Xl zex>D0EgS-GL_*gwuRa1++;co*I=vi+l@N7xAZ1hLc0ChH?tAU_`~^QWveNWYYI-_}eL3XewI`b{ue zqp-F(EO#C9{H3iq%ywsMpiV%Ah-`LNga{XSI*KwO!nO}w+E#-D4lZdpbBLXDCS1ho z07lDPF+t4{1dQYDl3D`p#dr23+6dx1()O_@c_L(6?*x5m&B)^!vxWnyWOk|FSibqa zT3jN%{kt414~v5F-Th*#bO(;x)T*dCA2M1TjJGk&Zu zguVK@afXa--})x2Aa#zWI8H_v&<9a0XJ;h42w3gmw9X5USr^&-Wk;@S8GW}ET}or! zq&n1O4a2@Ma^@TGrNEY0^JQleR+d5e0KmQ?R~K2kv-7a0M8Rt3e=D5-raQUy6M&c9 z|2o)rDY_%y7RaBk*T_2p?Dg`EBeg zHFP1f{ohKl^YPOjvZNI9+rnCAyTQ-gNbX^9xPX36+iD%0kvG6!O zjlV+9h;v|HJ*}g(Ll3CP)|@kQvoUw)9+S_Z=5yUlj#^5C5svlKD|Q!MEDY|8E;YH- zd@pA*Ih3y2X1U^-icKcGXbQHL>S7(j7hK)O>Fgt8t-B0?KX-WLjwgo+HORvsq_x{vk%o-bpr3Mi_)WTEJ64$`nc*|0?J z((S(SbUSOCu^`dzkuO$w(tfVG@~-$s28TkgPvTk>%+cp3GwQbZ4VZPvn!8n9@@kBy z}g#&$_#yYd$=^YefB5c}RnUIMcu|L>^V?b`2udDH3a z@_)8bEMuc4y=AVd>sNlJ$$#cG@7FAZN`jDXjT1Rxocy5|$5P(l!mJ_PGdqcZdkLK4 zu>4*G{R={?XA1EoG>(<0c9J?BQ1s9*&Ga~<b^FIzwPpP{eQeCxK#i54=nxPJ3QR^e_JVEP5%qmWvBVOvMal?D_^1fe*gdg|Nr^% JVJQHX003$oK(+t? diff --git a/charts/v1.6.0/blob-csi-driver/Chart.yaml b/charts/v1.6.0/blob-csi-driver/Chart.yaml deleted file mode 100755 index 4e2744440..000000000 --- a/charts/v1.6.0/blob-csi-driver/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1.6.0 -description: Azure Blob Storage CSI driver -name: blob-csi-driver -version: v1.6.0 diff --git a/charts/v1.6.0/blob-csi-driver/templates/NOTES.txt b/charts/v1.6.0/blob-csi-driver/templates/NOTES.txt deleted file mode 100644 index 9ad135dd4..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -The Azure Blob Storage CSI driver is getting deployed to your cluster. - -To check Azure Blob Storage CSI driver pods status, please run: - - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.6.0/blob-csi-driver/templates/_helpers.tpl b/charts/v1.6.0/blob-csi-driver/templates/_helpers.tpl deleted file mode 100644 index 5231cd26f..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/_helpers.tpl +++ /dev/null @@ -1,26 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* Expand the name of the chart.*/}} -{{- define "blob.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* labels for helm resources */}} -{{- define "blob.labels" -}} -labels: - app.kubernetes.io/instance: "{{ .Release.Name }}" - app.kubernetes.io/managed-by: "{{ .Release.Service }}" - app.kubernetes.io/name: "{{ template "blob.name" . }}" - app.kubernetes.io/version: "{{ .Chart.AppVersion }}" - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" -{{- end -}} - -{{/* pull secrets for containers */}} -{{- define "blob.pullSecrets" -}} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/charts/v1.6.0/blob-csi-driver/templates/blobfuse-proxy.yaml b/charts/v1.6.0/blob-csi-driver/templates/blobfuse-proxy.yaml deleted file mode 100644 index 5008d9142..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/blobfuse-proxy.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{{- if .Values.node.enableBlobfuseProxy -}} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: csi-blobfuse-proxy - namespace: kube-system -spec: - selector: - matchLabels: - app: csi-blobfuse-proxy - template: - metadata: - labels: - app: csi-blobfuse-proxy - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - initContainers: - - name: prepare-binaries - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - command: ['sh', '-c', "cp /blobfuse-proxy/*.deb /tmp/"] - volumeMounts: - - mountPath: /tmp - name: tmp-dir - containers: - - command: - - nsenter - - '--target' - - '1' - - '--mount' - - '--uts' - - '--ipc' - - '--net' - - '--pid' - - '--' - - sh - - '-c' - - | - set -xe - dpkg -i /tmp/packages-microsoft-prod.deb && apt-get install -y blobfuse=1.4.1 - dpkg -i /tmp/blobfuse-proxy.deb - rm -f /tmp/packages-microsoft-prod.deb /tmp/blobfuse-proxy.deb - mkdir -p /var/lib/kubelet/plugins/blob.csi.azure.com - echo "Enabling blobfuse proxy systemctl service" - systemctl enable blobfuse-proxy - systemctl start blobfuse-proxy - echo "set max open file num" - sysctl -w fs.file-max=9000000 - echo "waiting for blobfuse-proxy service to start" - sleep 3s - # tail blobfuse proxy logs - journalctl -u blobfuse-proxy -f - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" - imagePullPolicy: IfNotPresent - name: sysctl-install-blobfuse-proxy - env: - - name: "DEBIAN_FRONTEND" - value: "noninteractive" - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 10m - memory: 1Mi - securityContext: - privileged: true - hostNetwork: true - hostPID: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /tmp - type: DirectoryOrCreate - name: tmp-dir -{{ end }} diff --git a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.6.0/blob-csi-driver/templates/csi-blob-controller.yaml deleted file mode 100644 index 4845f385a..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ /dev/null @@ -1,187 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ .Values.controller.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - replicas: {{ .Values.controller.replicas }} - selector: - matchLabels: - app: {{ .Values.controller.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.controller.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: -{{- with .Values.controller.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: {{ .Values.controller.hostNetwork }} - serviceAccountName: {{ .Values.serviceAccount.controller }} - nodeSelector: - kubernetes.io/os: linux - {{- if .Values.controller.runOnMaster}} - kubernetes.io/role: master - {{- end}} -{{- with .Values.controller.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.controller.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: csi-provisioner -{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" -{{- else }} - image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" -{{- end }} - args: - - "-v=2" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--timeout=60s" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} - - name: liveness-probe -{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- else }} - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- end }} - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} - - name: blob -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - args: - - "--v={{ .Values.controller.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--custom-user-agent={{ .Values.driver.customUserAgent }}" - - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" - - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} - - name: csi-resizer -{{- if hasPrefix "/" .Values.image.csiResizer.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" -{{- else }} - image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" -{{- end }} - args: - - "-csi-address=$(ADDRESS)" - - "-v=2" - - "-leader-election" - - '-handle-volume-inuse-error=false' - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} - volumes: - - name: socket-dir - emptyDir: {} - - name: azure-cred - hostPath: - path: /etc/kubernetes/ - type: DirectoryOrCreate - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.6.0/blob-csi-driver/templates/csi-blob-driver.yaml deleted file mode 100644 index a742b506a..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-driver.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: {{ .Values.driver.name }} -spec: - attachRequired: false - podInfoOnMount: true - {{- if .Values.feature.enableFSGroupPolicy}} - fsGroupPolicy: File - {{- end}} - volumeLifecycleModes: - - Persistent - - Ephemeral diff --git a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.6.0/blob-csi-driver/templates/csi-blob-node.yaml deleted file mode 100644 index 5b3d37147..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/csi-blob-node.yaml +++ /dev/null @@ -1,218 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -spec: - updateStrategy: - rollingUpdate: - maxUnavailable: {{ .Values.node.maxUnavailable }} - type: RollingUpdate - selector: - matchLabels: - app: {{ .Values.node.name }} - template: - metadata: -{{ include "blob.labels" . | indent 6 }} - app: {{ .Values.node.name }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - dnsPolicy: Default - serviceAccountName: {{ .Values.serviceAccount.node }} - nodeSelector: - kubernetes.io/os: linux -{{- with .Values.node.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - {{- if .Values.node.affinity }} -{{- toYaml .Values.node.affinity | nindent 8 }} - {{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.node.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: liveness-probe - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir -{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- else }} - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- end }} - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} - - --v=2 - resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - - name: node-driver-registrar -{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" -{{- else }} - image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" -{{- end }} - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=2 - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "rm -rf /registration/{{ .Values.driver.name }}-reg.sock /csi/csi.sock"] - livenessProbe: - exec: - command: - - /csi-node-driver-registrar - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --mode=kubelet-registration-probe - initialDelaySeconds: 30 - timeoutSeconds: 15 - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} - - name: blob -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - args: - - "--v={{ .Values.node.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" - - "--nodeid=$(KUBE_NODE_NAME)" - - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" - - "--custom-user-agent={{ .Values.driver.customUserAgent }}" - - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: BLOBFUSE_PROXY_ENDPOINT - value: unix:///csi/blobfuse-proxy.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.linux.kubelet }}/ - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /mnt - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} - volumes: - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/ - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins_registry/ - type: DirectoryOrCreate - name: registration-dir - - hostPath: - path: /etc/kubernetes/ - type: DirectoryOrCreate - name: azure-cred - - hostPath: - path: {{ .Values.node.blobfuseCachePath }} - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml deleted file mode 100644 index 575437fb8..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-provisioner-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-provisioner-binding -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-provisioner-role - apiGroup: rbac.authorization.k8s.io - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-resizer-role -{{ include "blob.labels" . | indent 2 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-resizer-role -{{ include "blob.labels" . | indent 2 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-resizer-role - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "create"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-controller-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml deleted file mode 100644 index b4a30373c..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create -}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-role -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-binding -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-node-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml deleted file mode 100644 index 93c5c0149..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml deleted file mode 100644 index ef4c7d754..000000000 --- a/charts/v1.6.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml +++ /dev/null @@ -1,8 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -{{ include "blob.labels" . | indent 2 }} -{{- end -}} diff --git a/charts/v1.6.0/blob-csi-driver/values.yaml b/charts/v1.6.0/blob-csi-driver/values.yaml deleted file mode 100755 index c970b080c..000000000 --- a/charts/v1.6.0/blob-csi-driver/values.yaml +++ /dev/null @@ -1,152 +0,0 @@ -image: - baseRepo: mcr.microsoft.com - blob: - repository: /k8s/csi/blob-csi - tag: v1.6.0 - pullPolicy: IfNotPresent - csiProvisioner: - repository: /oss/kubernetes-csi/csi-provisioner - tag: v2.2.2 - pullPolicy: IfNotPresent - livenessProbe: - repository: /oss/kubernetes-csi/livenessprobe - tag: v2.4.0 - pullPolicy: IfNotPresent - nodeDriverRegistrar: - repository: /oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.3.0 - pullPolicy: IfNotPresent - csiResizer: - repository: /oss/kubernetes-csi/csi-resizer - tag: v1.3.0 - pullPolicy: IfNotPresent - -## Reference to one or more secrets to be used when pulling images -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: myRegistryKeySecretName - -serviceAccount: - create: true # When true, service accounts will be created for you. Set to false if you want to use your own. - controller: csi-blob-controller-sa # Name of Service Account to be created or used - node: csi-blob-node-sa # Name of Service Account to be created or used - -rbac: - create: true - name: blob - -controller: - name: csi-blob-controller - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting - metricsPort: 29634 - livenessProbe: - healthPort: 29632 - replicas: 2 - runOnMaster: false - logLevel: 5 - resources: - csiProvisioner: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 10m - memory: 20Mi - csiResizer: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - -node: - name: csi-blob-node - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - maxUnavailable: 1 - metricsPort: 29635 - livenessProbe: - healthPort: 29633 - logLevel: 5 - enableBlobfuseProxy: false - blobfuseCachePath: /mnt - resources: - livenessProbe: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - nodeDriverRegistrar: - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - cpu: "2" - memory: 2100Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - operator: "Exists" - livenessProbe: - healthPort: 29633 - -feature: - enableFSGroupPolicy: false - -driver: - name: blob.csi.azure.com - customUserAgent: "" - userAgentSuffix: "OSS-helm" - -linux: - kubelet: /var/lib/kubelet - distro: debian - -cloud: AzurePublicCloud - -## Collection of annotations to add to all the pods -podAnnotations: {} -## Collection of labels to add to all the pods -podLabels: {} -## Leverage a PriorityClass to ensure your pods survive resource shortages -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: system-cluster-critical -## Security context give the opportunity to run container as nonroot by setting a securityContext -## by example : -## securityContext: { runAsUser: 1001 } -securityContext: {} diff --git a/charts/v1.7.0/blob-csi-driver-v1.7.0.tgz b/charts/v1.7.0/blob-csi-driver-v1.7.0.tgz deleted file mode 100644 index 75e8940cdeea4559b710450a49a2f332d1298464..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5580 zcmV;-6*KA|iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH>QbK5ws``Ld5$L(Ev_bS_Qess?>`H&{=US?Y-6Q_N5Z)Yb% zOR&w2Bx*^@jW^Bz{)VJ}Qk3mDuG76U`=PN!0w4*1AOKP$pN<@t6USp@2CRKL#Z0v3 zIPibiSm|^+ouh*T^?#?+G5_!E9qs?Hcd&QZ-9PH=zwQ3ekw4!4fI6Eap}xdiVD>}j z$+*nU{YeUr$Y)@j(6EPQdkq{#g`cgrtxm%O?lKYy_3Y%2m;rj{(-9g9%J2lx>F^wB z=4pgDfF3efsxdPFK6jUQrBF=ypJ6`+zF7sZME>6%bq`GWKj;7KOy@^=*b z{w4K^JMW?M@qmg;1{^}sK*;6flF=EF`T>{)0p+}X8;^j6Ab?905}y<0jS48ct!}Hk z45d$I5CZ3yjE-P6jA@fdHYwpdT;9cydT^#>>k1}>3x*%jPuVz;Ooye-OB5WeG{{%L z$sdmyV5S?C=5v1;s>X{K=nBTbAanrOaQS!yDCXdyFH;B=G!jmb zVni-MFc|mHREUW8+6CHLgtk5E^0vVImbYCRj>#lu;6&7OTp9{YLSStP&r!gfh>;l< zkdEBE-B8ajW8WWYYI^9GUmGuwgY?Q3%#%5q{|s|oWq<={$SMvxjk%x!^6?0K)er1> zh=hWo00j21zztVb`Jig_(EURLp%>`(NABP#sxFKT4s1qTIB{JXheFPd%K!`LA;Drm zFVO$W5t4tuK}lVNb!Cpe5Z{+WshfCcOc|QfxP^uwBz0r#b3kM)AEGZfR1aeg@;5{D zOW2b5Xeb!p=PAt9kRnaUJDWolLvmM?%)OuPo51{-15 z925yYvoacu0{z(=OFtwKpT^#)Vw!wrdk9N|#!)Y|R`Kd<^9Z}pLy{#OKIa0YX2iZv zzx0Dh%un++8Ld;w#Q?+?%5Jks6;r|y2O%WeyEOJa*?W(0Jo3SlGnNyMJPI6zR3Hx` z2t7nY^l^BeHg6!wdyKgJk}}al-G3bIAJ~=L6tFL*c{Qmh8ALvDG4CPuD-JKhkC+R| z42pXsBs%#3Gw^%pP&ME*W-f3I)26;9@_-1Q{zAx&;vU+|eg+UI)pOYCd?d+J2EWI^ ztBpEArCB#^W$EL}xc8ojJ6{f0M%jHLN>ht(X`JzROhO{&i8hn6JOm$HL0R%hP#+kU z+k*tGgKlBoLro?3jQY?jEAc?FSTn&$BiUL)YxeI57d(9d<1x6RhnfRAbf@6OJ~UT? zpGZ&S;}F)v(oh<#s!-(PXHz8szWWs78798e6MCjA!hg7$@b6b78$tV#TD@B7h8-D=yX;ap2QSY>e~rfrM?ip_V6=*citTrsSGYZ_Ph9)tL<_PX^5cz<5^0!f?hwdfp6E2xb z3Hu&;XjCgbY}G+#`hNrjk&gxN_Tb{WKWvFR@njs!{J;B0UBmx#cy!d=`G2-iuBWir zsf!3llZ5R(hsh9toDfkgE z-V)IhUBOG_zq@~A`hSlOck!R?l>7Vk-_eW&J-N1_G4VmnBRCG^u((s`p})5u9vTf* zt$!Ec&{Jz>n0RvJ->wQ7s(SbL4)S13LO@MrR>^uz=$yHo=97pqbv z8g{}}l^j{bNGQgr`4{f|g*VF`(0?b}9rRUsRiu_`{!bG%Z<_0Lh6Vu*QL5N@9g%4T zZL(=-IEsw@u(opV?@=n-rMYP#Ya_M43tipNRHpSLYwNO(UcW%6d`UvrkG;Cy)KKQq zAflnN$qT;JYP|?E;n4A;dmi8rPr!3V^K#!)r)5g}IGy>EEM{s4n8iOOv`Uk&==Ye4 z(x^T>G~|||#iwn>hPEyw{Mjm!lSoq039VlvLH~^de?g55acO3?*hjJNXYTtkWw~|C zk$s+{+{0d&?3_Z=4KEQ_T=S|C!{G!LwnLfsUJh?fZ^|8eZk_u*mHPERb-Fkaqj&SC zM1+^cfA_nG<@LYY+3W1@*8go3Lo!3^LCfL`OqP~BR|H29Z_oA`w=nO+Z!y$-9 z8Vaj=Pjg;MURiyhdWp~upz0FgXG_CZ6q3+`P@n@T-8=#nDoWRoUgA>}?({LMbt-Ug{ysTFN0lsXW!*H^RU>ix~+gi8Gpnlk-VPv&a2A zaATE%i3wg{qj^Vb4|nev)`RB`6=Ogbo#PZJ-!2qI&i^=yeCay-Hs zDLuA%O{p}%ha5ry%y|0BaRg=)5U>mZ+|nIi78a2+KX-{$RV0fBJAGc32-ON5R$y)DMtG1UlERkGXar?hmbSR zf;Y!6?^7afKKHMN=NAK$>M?07Ky4T#9*m6IEI`h9Ihm^g3vNBakP}s$`0dD#CnVGc zQYCIsnp3r?xx!E45y-Mu*lJ6QM3|M0+Qa_!&Bv2}-Ml~l(7zdc`gr{EK7Dj?*&k%j zm#~Mm%^Ezy=Uln&zMwI0jfoFVfbWi9-WN%<%(n651!{i5M7bK}KBGJWSq6-tdW4$h z0QvwC+UFIZ0uw(2hBPprPRxqG(U^tUSN(`h`TC(vU5&)!k@t?B>S>vVSU->sCy zf1?7sfn2O@PM)e$r=Z3Q>=7p6fs%ixE`)11dz3?E8Q8UaLV1>}9eny;qgc&@w46{I zsnrNaPV$j%xP}*M6Q&)Q29@S;Z!=B@r1dVpjTke)G9-~@ecUw;P-|Rkj5$x)Y)Tq7Y-gk32 z&HXTDlZxZC>CBG1=Dw-vDB}xz9s|ygU%oy$JG<%+hkvtF^Z|Ro97vKmEj0uQfX3o@ zuiN34*Kj8ocJyf)CtX^Om78akU3Ws7ePh>7PL8!Wb#E7W29}L$e!8qYq^tU*(^H6f zNt_PUy#-NlOQX`35?L0sl8um5G{`M1b$i_+!Rer-dCiVKJh+0Cl`^3>D`X2mSNXkM zc})@96vkBqtSm}(45@3X*~sL@@EOVqI{Dm*C=#+X<2A8E? zI@VzS617XYUmeoh`Ar|WPB^nVmP?N8SR8Oty@y^zNhpqAzCInE-}DD(mlx-QYui$i zoEyr7rPEUXdUmvhQ$}{o+IJwQyhylynr=jFE1yb3a$*k5!4n9DrL}Pm!wx8i;Bb{& zX8~j!-Ke2R9p2LC4;-EHnQ_1rM{<6*5zd?MD!B~)4`m$KWhd+_09H9?o0{x&%np?A-FX3cn|G&$}UvBz-6!Z z%C93}gnIVI^=9DzeY)!3oL=?M`h)B9lMl+{Z!-$tGa6L$Kr~B!#L>?%zk;zXQ6wxL zr)1zcW8&KqQdF{v96ifBstyTZzvMkHcciE!?M7`JlDl5JU2jSxOB?*2vz`4So( zIR=k1yj@ZYz@6yUoJ12rbW7SUb_9#LXsbP@D~uU={J^YXno6EsYLON%H(AZ)k(|2U zxVys&*XnjiYp(C+cp|RV%+^&M8CEyQ66eM%XNp752S@83hlDW)2N^z$582<%R;lA{s%1s@Uh z&ciXi2-T9B8I^`IO0ElOS?&r^#Ep6B_K^2_o0*3W)8+638G}1_efUT{$nG(@lw-z4 zn&If6el&#u820}}ifIM;S9y9*xe#=*{C7Luy^eAI`|V-(U?>0EDCYUCN2mWN?$~7F zj{$HkwGl@i7BCbH3z#IQtfVbW!pSG~Dm8!1OSMco%I{N(v@Lp71h`J-P^G8Ok~3tX zVwWvcJi_=C|=0Z#p_Jpt&myUkICebEU<14PbHZ}^V9l?HMVfjb8!gr}5f9})}#o4PyJtdskad%~B zo&vsZ7KZ&6@fG;Za;4>Fxo#QtjU4j0{i2+F?W?$swEmfn>48$od}ER@ypj| zSLdJmS2tJvpKgX1r$66Zo?QR43Zcwn3u4!Bmm553u^!RZkeA#}zuAtYPy*_~u^p75 z-q(zdAAYkSLgDP9rMq@m1?JP9f&GjUI;|{_Y7DSnr+wGSjm^QiFf?+_0ON)^=}3j zXZ@SO$;bYhiFDeJ@U2qlR!CS=1bmBBwR*q{lGMJxq`y_t=JdvCg@m~>M@~Sb5o<26 z;6;#S=XCk5>ZfJlJ<1YYFaK_r1-HwBtAMJ}*vp|Bm$B3$DC^_ZM|(`b_iBdO_yAtD zf@-Yo79N4Cq)UAjs95R0GZAo|+YZo{5(pPr&sNKn(Fjkl{>^TQkvDNXY2#U5c~`u`!y?e@leiWI zHS+xAfx5{*KDQ28wR`F%cd0xjlWu`lT6s81cl%_Y?J2WIt@_%f^6gUjc4a%IKL3Y} zu zW|ZaeC3aF$X$EYhDoj8$-=L;XxcXmxMn?Xj$4UO>$yFazyEPEHJ|@A+Saoib4OcmL z(}p>z;rf{K75lkjpI`=|SYzap-n>E0s1DuoZn>ODgJYu|dJ;gFQs$AcC=xatDmQK; zgVt~yr%{#GcO>*kIQbSrYuq2>j1pAqr~JmF-@p}#)atGbOLPO;8Iz2|NoRjKuh-jd;14n^Zehz{?Ts#zm2kF|G(9RK;;vDk4wNaDgNSk zd@m}$cvNI7%HF~hJ2k(#Qm_A$iN|{4pW+E#cK*Ba{qOEUXZQWT?UYK)r)=QmD}j+G zV^tsvcW6s&$HyNWeT4EWe%183NBsLLkC*?)TY^jF|IqmUcei_Zu(SWRQl3rzb3Dc zVQyr3R8em|NM&qo0PH>QbKAJF_gQ}hMoF8rFO=k;xoSoq99y}1E|EQ!lh?kxjt3%F z5^9oQ0Z_5d@&En?0Dpp@B+H7Ny9)Ubiv)HTi`~Uyu>eOt9XT#1j>pIhSo?H}nP|;% z;Qz3((&=X zBP~3Q5C_mjhDbGL2Ega;@~#w$ssA(V$G|sh0G8AL(K z#lC+@ed5l$=zKh&;*tS}P&5#7Ik{wXMx=cJWL{#SNI{{03ebrIH;Ir>6;U-nry@z9tuG^cS3 z4M9kDjIqxFk+FP;zTi+jj5)~P4AC!POX8!UVAS`4b>&DqS{1X04#(&PlEfl9M#F@J zgx-YbDSpZn`xBupfKy_)2A~ETVb^4e1fTf{N;FO-rLJ@^BxoFlRABi_$prR1^`Gw} zF$EM+k2fOfo#eH;=>DM!#>XS@7eV--D(J?@anNbZ1r3aLBoq_{Ah3r8Zn&ySp&>#q z(Cv@h!BJH8dn8RCUg{v1e!=nB|m8tQ6}P0z7!O(I7FG!AZegw3KFQZA)=I#vA2wYrP2G+>T; zsizvRzBZ4r3tc1!#L@g~ASr9~OFxJtce6H`_)jSp0}x**yUpx`m=d1x$EC6FNrmzV z$0Hv+X#{e@kw-!DRUi){2t7nY^l^BeHg6y)v5dIg6eVXKM{vHTYoFo;qjPAolWeRH1R`_ z_LH*Yk)S@X#KbBn4!VVT7d6#%z^D(cvKb1Lm^2fNG?J|)v}XT~aKY0TFdl;|x~Msz zLw5>Z>_c-U_=yHYJ`Q0$EDbfWRgHpt{A}hRz;~ZQJj2A7y52>5HU&OfO@R+8+6keA zDp%~|n8PKbck^74N2SLKA_=*`zMnS8sLU&80fN2OVd8Id5Fhc~MFin{;=>>g3e5uJ zMe}cZg`@{6v2dRv4+}VZm$gfgonm(imsm`@s2wPG7Dv+NU6TNY0teA~sH&Wif_)04 zPjU$?Ae?Itq4W0b+p4MBs`02@2t2dBdK!|o%!pNKSCa0=7_f+$daot>{qQG7<0xH_ zq#~eS{siJP^g#q2f`rE!Ii94AaC(%$_&YF&Y<*UF|>^za}_ z(!d;tC-$b-WV&Xh6?%ARBFFiHg*$z2h5OD*z5d@!!Hr2`hR@1 z+yB{4xxa7!70pP{l|CFA6CcDpf?gom;!dH9{@Q+cXf#x{{#}GaPc8bxBn~A1cGZ@l zs&{|yAP>eQ1k_ZMs;t*UPW}qVzCbO#%cxYd^;z#Y$eI^3V2pTxzDhSRbkXsF`jZ5D zwna@R2PPN3PD)$jR7IFHEt)z>YO?i&FxP>_(LASEM|xTfJK?HIjx1s%6l2u<6LWLiN-q%<@fMaHs8`#AUaDAn!K z*tC$fky-|Xu5M@=(|VG1^sJ6vzd)yaNkZ3;y}I7iQ0CGgqM`E03ct z9^ep9z;j0Pa^KUCXG;4x9r=@N&(sbu+y9i%Dowtk|HM?3xcczWkPc;wPuofiZCyy@ zvsEM~(WGLOQolxm{u2lOf*Kj((#UGHk7D1?qV!|R^6(x<_HmB#Kzd=Ya|+F9yF^@x zz^g_KhZ9)X4rM`l$=;gYlsop^Hurlf_49wa0(T-t@8(b05neq1?{$ujj>_}@erK<< z^Z&O}49%45_m(AOluRvot_Y4I-k$9>Zb|5M(HVw-hC>jIbg!fuWXokKd6k69>RkK= zN@f!s3#iT)el|~hMIi}22n9Nn4S57=*D0M)x=9>LxYG{>h$@s+Ltm-}Nz+27R4k9!aJk^01!oWL=83`vz0F#81^GQgv$Nf8SW0jXvXjzWITmuVu z`L#5diWmL6h(Qt{Gisnb8Y|sF%iCh)bwI^=Xsx5ONo>$Hh8bZZ#=etBN016Z5^ z7@@WZqIUCN1qd@0K>Mh+tji`Ouy_fgqqpl-Z3=c*_GDfdoqh_`GLlgCSs43et(O>>zG!s*( zYPA>LPLM;ioJ82Y#S`F8mM$b^$y0*#=RYGH2^6cqe)7wifJiqOkTcJM{Jod=DM>e< z`&YyBi-FmPF=;Fy#PC%-7#X#LTaPg0M3sPSJM!ZR3AMXZi2;`eMeXLe!cSt$$YShm zwWWU{%*saXVgLH(cAo{rw`l2)SEY=dyy`h_=YxilO%jaFyp!;^O(`Zu30&rYuU zXYYD1@6AU`xL{T{7$9Zw^=l-63*?N^%lj&w4=eHs+$lxP5sWEQL4|Na!igPX*X$=k zE*AE>)$ssNiuxma~~i= z2fPAQVB+T;*1&u^F)RL?#w^6XQl8jE?u>ViO1-0!t?g^F7}FA_nM$tkon$Vt`Q3@M zg|muNp#Yrq-<_WfZr)#A46gfwGh-Z;->NbZLLxm7>vrZ>#VXWi_)Z#2oF>9d;R>NaX5^Lcyk{1kQMzTR#%#CN8`i=A4B6aNdIr{=ekVt|Y^1D$1%ywVGrQccw*xr{0E#lEnS zE8;FXBTV&ne!=wKL&Zj=3!nS@)Y?6Fu*XvBJ{u_K|I1_duW~bo{FI@1G`ojFi&W;gHPXU6srXdmlJ9uwYtNV zlYFcjt`T@#g_czr_hY3=9vjYb=F{yIV^=AEzW`ogH>M1_kkFsEDVzo9TK2JwW?xYe ztKns8X^;=U5HYoJERD_<&}Wg^0`A3VDqOSgS)roGTiEe3dst~*Mqc5oW#+bvN1;mI zu6&whU}}2#bpcuqVI4o9!m^@n=IQ4JK`O&z)Q2w8$;YOwB?DA9x3y?lLgc6QYt4*z1Q=mYkEIglK3veXbH02+(l-hPK$ zUc;SW*wJUPoOEjGskqoGOXh?$`%0;uoE&U%>fSC|)-U60e!8&C(p9%1(o?8;Ng@W+ zy#-O&i9n?#8nR4iB_AQFXpmc&>h|`F1gA_(vzi@!cyt9VE9F6NR>>BCuF8A4@|r5P zDU7QqSXq_o1X9;jqmk*05%7~0bn3YiQ6_r!8MI7oo~phithJ=q>xlmX0hgs-JJt~Y z617XEUmfw>1w9`*PdKwWkxNeYSfX0eyoX*yNho?RU!M-oZ~B9?%Zu~DwQVX%ju&Oh z(rKxGT|3&sVI(_d9XpUiUL;&UO&21znNOvmIWY(3;0c7n(%LwMVF#2$aJb5?v;Z;= zZq!hu&Uonqw2n@BbIfiP7-KyF_BhCSn){02)8(@ThdO5FKoE&}311C(>!~yYvb28{ z3C-L}eKO}M<`HG{v##-*@h)OiP?!2$bbWfc4jE_`E>-0 zP|x1D-VOY}Pgnh$)2seje{g+%@XbY7OHt}_M~X($?$pL1x$Cyu^{zxRwZVUK(UYo#n(7qHP+<2~ z=~mN}y)rZVgU{zz7lV)e!L{P%<;nHmYus!QZsG8MTQP}CJPL8^-<*aO3YYJ-*{80p zy5eSIqoYqyv$Rk4wlJ|>8oWAG@$ z+amr-KY}a)yqi?sOOKGf{REwIdVc0iDPQ3wN z3T%lrU$)(0BpMX=7R;M^sv>JPw(cVpD0uG8(?392VgEl_K(BuQXxaJyqy6LE`JZPh zj^mWJ{JB1}-@5%#pIuB(_?!;U_2ngtlL5*xpQj(;!9rkndX=1p$*=c_sCOQY=|!lf z)Xb?gj8Sq~NXv3lh$3#xOLvF7*UQX&>oA=TKaerFbJvHD)Pw96lS|1nF4FWw2lb;V z1i-NWJ2Ffw=)X#Qd&-5di}ioMb9iLl|MB+d;CQG1+bHJwy+^12DCyW_;*SAvEwiCB z$A^Mp0h8olo%DrCIMK# zp{Ec)@j{+XUT6Aljm+DAOeT*Mfpv3uD#?i+~G-!^;EuwyyWWyo9##nC7>Sk?4S(uzUJ=u(Kp*f zC>%Vqbk`26AbdJ7Z6R$bQyT2yhDkG6$;G(FF4 zDif`Ia<|wrJL-CQb@7jXE(D_Edj_lu&9-ZetmMnu#Ou9${qv`H{hPtXS^s8m^0B{W zBAt#Se5(|?6%y7I0pB83tsd}#B(?7^>2H;^IlXXNAz`j8krNQ zUIkRmoU$CMalue6g7S>9I=9CJe6NPh#wQx96;xyGS@8&5rCsXtRK-gFovDiJ+f2>nmH`E&i(2x=dXG^~K%14wU7F^1 zo~CK6*cK#cKJreECoPT3Gw+I*c~}&BeG=EAphlmc%&43ED~RimRlDk6a;wTya{U%q zrIp!Hy3Qy2%1~K6YR%U!*>IO^_{S*q`9ExgU1uYg04>S?J3e@OVBY`pw)1wE|Fex^ zXp|iEmcAiCUHP3P|LM>CTu~oN2|}tij>VKR@|&6g3VDMIvxZd5WIlG_P5=wBU%Zb( zegAL${6QA`+M96X@=0a+@hF$q@2s&kucxXQVkHq1#4*THLMqpGd%Na&Gp@-39sxIe}jC8*X< z`He^a23I6ntGhBR25JG=G&Hp-Iq|5hggl~4FRP65wk_>27bUQB-RsK{1~y+tT?W`1*}-u@>8kM+Pm zB@(>s{CDO1@B4?H&e6{PZ=+OpRLTxsJ`)&OC{_cqaD}$Scl@Sbge-p5_PII!eU-=S z|KmNurTTwleE)ra|LADv|81o_oBrpH#7^^fWmk4(SDvB#e*gdg|Nq&NmXZLV000(U BX3zit diff --git a/charts/v1.8.0/blob-csi-driver/Chart.yaml b/charts/v1.8.0/blob-csi-driver/Chart.yaml deleted file mode 100644 index ac625798a..000000000 --- a/charts/v1.8.0/blob-csi-driver/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: v1.8.0 -description: Azure Blob Storage CSI driver -name: blob-csi-driver -version: v1.8.0 diff --git a/charts/v1.8.0/blob-csi-driver/templates/NOTES.txt b/charts/v1.8.0/blob-csi-driver/templates/NOTES.txt deleted file mode 100644 index 9ad135dd4..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/NOTES.txt +++ /dev/null @@ -1,5 +0,0 @@ -The Azure Blob Storage CSI driver is getting deployed to your cluster. - -To check Azure Blob Storage CSI driver pods status, please run: - - kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch diff --git a/charts/v1.8.0/blob-csi-driver/templates/_helpers.tpl b/charts/v1.8.0/blob-csi-driver/templates/_helpers.tpl deleted file mode 100644 index d99392f32..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/_helpers.tpl +++ /dev/null @@ -1,49 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* Expand the name of the chart.*/}} -{{- define "blob.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "blob.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common selectors. -*/}} -{{- define "blob.selectorLabels" -}} -app.kubernetes.io/name: {{ template "blob.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Common labels. -*/}} -{{- define "blob.labels" -}} -{{- include "blob.selectorLabels" . }} -app.kubernetes.io/component: csi-driver -app.kubernetes.io/part-of: {{ template "blob.name" . }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -helm.sh/chart: {{ template "blob.chart" . }} -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels }} -{{- end }} -{{- end -}} - - -{{/* pull secrets for containers */}} -{{- define "blob.pullSecrets" -}} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} - - name: {{ . }} -{{- end }} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/charts/v1.8.0/blob-csi-driver/templates/blobfuse-proxy.yaml b/charts/v1.8.0/blob-csi-driver/templates/blobfuse-proxy.yaml deleted file mode 100644 index c131d8316..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/blobfuse-proxy.yaml +++ /dev/null @@ -1,126 +0,0 @@ -{{- if .Values.node.enableBlobfuseProxy -}} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: csi-blobfuse-proxy - namespace: {{ .Release.Namespace }} - labels: - {{- include "blob.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - app: csi-blobfuse-proxy - template: - metadata: - labels: - app: csi-blobfuse-proxy - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - initContainers: - - name: prepare-binaries -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - command: ['sh', '-c', "cp /blobfuse-proxy/*.deb /tmp/"] - volumeMounts: - - mountPath: /tmp - name: tmp-dir - containers: - - command: - - nsenter - - '--target' - - '1' - - '--mount' - - '--uts' - - '--ipc' - - '--net' - - '--pid' - - '--' - - sh - - '-c' - - | - set -xe - INSTALL_BLOBFUSE=${INSTALL_BLOBFUSE:-true} - if (( "${INSTALL_BLOBFUSE}" == "true" )) - then - dpkg -i /tmp/packages-microsoft-prod.deb && apt update && apt-get install -y blobfuse=${BLOBFUSE_VERSION} - fi - dpkg -i /tmp/blobfuse-proxy.deb - mkdir -p /var/lib/kubelet/plugins/blob.csi.azure.com - systemctl enable blobfuse-proxy - systemctl start blobfuse-proxy - - SET_MAX_FILE_NUM=${SET_MAX_OPEN_FILE_NUM:-true} - if (( "${SET_MAX_OPEN_FILE_NUM}" == "true" )) - then - sysctl -w fs.file-max=${MAX_FILE_NUM} - fi - - updateDBConfigPath="/etc/updatedb.conf" - DISABLE_UPDATEDB=${DISABLE_UPDATEDB:-true} - if (( "${DISABLE_UPDATEDB}" == "true" )) && (( test -f ${updateDBConfigPath} )) - then - echo "before changing ${updateDBConfigPath}:" - cat ${updateDBConfigPath} - sed -i 's/PRUNEPATHS="\/tmp/PRUNEPATHS="\/mnt \/var\/lib\/kubelet \/tmp/g' ${updateDBConfigPath} - sed -i 's/PRUNEFS="NFS/PRUNEFS="fuse blobfuse NFS/g' ${updateDBConfigPath} - echo "after change:" - cat ${updateDBConfigPath} - fi - - # "waiting for blobfuse-proxy service to start" - sleep 3s - # tail blobfuse proxy logs - journalctl -u blobfuse-proxy -f -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - imagePullPolicy: IfNotPresent - name: sysctl-install-blobfuse-proxy - env: - - name: DEBIAN_FRONTEND - value: "noninteractive" - - name: INSTALL_BLOBFUSE - value: "{{ .Values.node.blobfuseProxy.installBlobfuse }}" - - name: BLOBFUSE_VERSION - value: "{{ .Values.node.blobfuseProxy.blobfuseVersion }}" - - name: SET_MAX_OPEN_FILE_NUM - value: "{{ .Values.node.blobfuseProxy.setMaxOpenFileNum }}" - - name: MAX_FILE_NUM - value: "{{ .Values.node.blobfuseProxy.maxOpenFileNum }}" - - name: DISABLE_UPDATEDB - value: "{{ .Values.node.blobfuseProxy.disableUpdateDB }}" - resources: - limits: - memory: 200Mi - requests: - cpu: 10m - memory: 1Mi - securityContext: - privileged: true - hostNetwork: true - hostPID: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /tmp - type: DirectoryOrCreate - name: tmp-dir -{{ end }} diff --git a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.8.0/blob-csi-driver/templates/csi-blob-controller.yaml deleted file mode 100644 index 406ae9834..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ /dev/null @@ -1,192 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: {{ .Values.controller.name }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ .Values.controller.name }} - {{- include "blob.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.controller.replicas }} - selector: - matchLabels: - app: {{ .Values.controller.name }} - {{- include "blob.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - app: {{ .Values.controller.name }} - {{- include "blob.labels" . | nindent 8 }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: -{{- with .Values.controller.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: {{ .Values.controller.hostNetwork }} - serviceAccountName: {{ .Values.serviceAccount.controller }} - nodeSelector: - kubernetes.io/os: linux - {{- if .Values.controller.runOnMaster}} - kubernetes.io/role: master - {{- end}} -{{- with .Values.controller.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.controller.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: csi-provisioner -{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" -{{- else }} - image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}" -{{- end }} - args: - - "-v=2" - - "--csi-address=$(ADDRESS)" - - "--leader-election" - - "--timeout=120s" - - "--extra-create-metadata=true" - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }} - - name: liveness-probe -{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- else }} - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- end }} - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.controller.livenessProbe.healthPort }} - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }} - - name: blob -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - args: - - "--v={{ .Values.controller.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--custom-user-agent={{ .Values.driver.customUserAgent }}" - - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" - - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}" - - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}" - ports: - - containerPort: {{ .Values.controller.livenessProbe.healthPort }} - name: healthz - protocol: TCP - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.controller.resources.blob | nindent 12 }} - - name: csi-resizer -{{- if hasPrefix "/" .Values.image.csiResizer.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" -{{- else }} - image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}" -{{- end }} - args: - - "-csi-address=$(ADDRESS)" - - "-v=2" - - "-leader-election" - - '-handle-volume-inuse-error=false' - env: - - name: ADDRESS - value: /csi/csi.sock - imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }} - volumeMounts: - - name: socket-dir - mountPath: /csi - resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }} - volumes: - - name: socket-dir - emptyDir: {} - - name: azure-cred - hostPath: - path: /etc/kubernetes/ - type: DirectoryOrCreate - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-driver.yaml b/charts/v1.8.0/blob-csi-driver/templates/csi-blob-driver.yaml deleted file mode 100644 index 891826a62..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-driver.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: CSIDriver -metadata: - name: {{ .Values.driver.name }} - labels: - {{- include "blob.labels" . | nindent 4 }} -spec: - attachRequired: false - podInfoOnMount: true - {{- if .Values.feature.enableFSGroupPolicy}} - fsGroupPolicy: File - {{- end}} - volumeLifecycleModes: - - Persistent - - Ephemeral diff --git a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-node.yaml b/charts/v1.8.0/blob-csi-driver/templates/csi-blob-node.yaml deleted file mode 100644 index d265db038..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/csi-blob-node.yaml +++ /dev/null @@ -1,222 +0,0 @@ -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .Values.node.name }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ .Values.node.name }} - {{- include "blob.labels" . | nindent 4 }} -spec: - updateStrategy: - rollingUpdate: - maxUnavailable: {{ .Values.node.maxUnavailable }} - type: RollingUpdate - selector: - matchLabels: - app: {{ .Values.node.name }} - {{- include "blob.selectorLabels" . | nindent 6 }} - template: - metadata: - labels: - app: {{ .Values.node.name }} - {{- include "blob.labels" . | nindent 8 }} - {{- if .Values.podLabels }} -{{- toYaml .Values.podLabels | nindent 8 }} - {{- end }} -{{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} -{{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - hostNetwork: true - dnsPolicy: Default - serviceAccountName: {{ .Values.serviceAccount.node }} - nodeSelector: - kubernetes.io/os: linux -{{- with .Values.node.nodeSelector }} -{{ toYaml . | indent 8 }} -{{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: type - operator: NotIn - values: - - virtual-kubelet - {{- if .Values.node.affinity }} -{{- toYaml .Values.node.affinity | nindent 8 }} - {{- end }} - priorityClassName: {{ .Values.priorityClassName | quote }} -{{- with .Values.node.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - containers: - - name: liveness-probe - imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} - volumeMounts: - - mountPath: /csi - name: socket-dir -{{- if hasPrefix "/" .Values.image.livenessProbe.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- else }} - image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}" -{{- end }} - args: - - --csi-address=/csi/csi.sock - - --probe-timeout=3s - - --health-port={{ .Values.node.livenessProbe.healthPort }} - - --v=2 - resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }} - - name: node-driver-registrar -{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" -{{- else }} - image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}" -{{- end }} - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v=2 - livenessProbe: - exec: - command: - - /csi-node-driver-registrar - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --mode=kubelet-registration-probe - initialDelaySeconds: 30 - timeoutSeconds: 15 - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }} - - name: blob -{{- if hasPrefix "/" .Values.image.blob.repository }} - image: "{{ .Values.image.baseRepo }}{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- else }} - image: "{{ .Values.image.blob.repository }}:{{ .Values.image.blob.tag }}" -{{- end }} - args: - - "--v={{ .Values.node.logLevel }}" - - "--endpoint=$(CSI_ENDPOINT)" - - "--blobfuse-proxy-endpoint=$(BLOBFUSE_PROXY_ENDPOINT)" - - "--enable-blobfuse-proxy={{ .Values.node.enableBlobfuseProxy }}" - - "--nodeid=$(KUBE_NODE_NAME)" - - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}" - - "--drivername={{ .Values.driver.name }}" - - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}" - - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}" - - "--custom-user-agent={{ .Values.driver.customUserAgent }}" - - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}" - - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}" - - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}" - - "--append-timestamp-cache-dir={{ .Values.node.appendTimeStampInCacheDir }}" - - "--mount-permissions={{ .Values.node.mountPermissions }}" - ports: - - containerPort: {{ .Values.node.livenessProbe.healthPort }} - name: healthz - protocol: TCP - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 30 - env: - - name: AZURE_CREDENTIAL_FILE - valueFrom: - configMapKeyRef: - name: azure-cred-file - key: path - optional: true - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: BLOBFUSE_PROXY_ENDPOINT - value: unix:///csi/blobfuse-proxy.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: AZURE_ENVIRONMENT_FILEPATH - value: /etc/kubernetes/azurestackcloud.json - {{- end }} - imagePullPolicy: {{ .Values.image.blob.pullPolicy }} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.linux.kubelet }}/ - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: /etc/kubernetes/ - name: azure-cred - - mountPath: /mnt - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - mountPath: /etc/ssl/certs - readOnly: true - - name: ssl-pki - mountPath: /etc/pki/ca-trust/extracted - readOnly: true - {{- end }} - resources: {{- toYaml .Values.node.resources.blob | nindent 12 }} - volumes: - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }} - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/ - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.linux.kubelet }}/plugins_registry/ - type: DirectoryOrCreate - name: registration-dir - - hostPath: - path: /etc/kubernetes/ - type: DirectoryOrCreate - name: azure-cred - - hostPath: - path: {{ .Values.node.blobfuseCachePath }} - name: blob-cache - {{- if eq .Values.cloud "AzureStackCloud" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - {{- end }} - {{- if eq .Values.linux.distro "fedora" }} - - name: ssl - hostPath: - path: /etc/ssl/certs - - name: ssl-pki - hostPath: - path: /etc/pki/ca-trust/extracted - {{- end }} - {{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 8 }} - {{- end }} diff --git a/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml b/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml deleted file mode 100644 index 39619c932..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-controller.yaml +++ /dev/null @@ -1,115 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-provisioner-role - labels: - {{- include "blob.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-provisioner-binding - labels: - {{- include "blob.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-provisioner-role - apiGroup: rbac.authorization.k8s.io - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-external-resizer-role - labels: - {{- include "blob.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.rbac.name }}-csi-resizer-role - labels: - {{- include "blob.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.rbac.name }}-external-resizer-role - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-role - labels: - {{- include "blob.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "create"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-controller-secret-binding - labels: - {{- include "blob.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-controller-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml b/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml deleted file mode 100644 index d269aea3d..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/rbac-csi-blob-node.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if .Values.rbac.create -}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-role - labels: - {{- include "blob.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-{{ .Values.rbac.name }}-node-secret-binding - labels: - {{- include "blob.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: csi-{{ .Values.rbac.name }}-node-secret-role - apiGroup: rbac.authorization.k8s.io -{{ end }} diff --git a/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml b/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml deleted file mode 100644 index 9c9fb477b..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-controller.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.controller }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "blob.labels" . | nindent 4 }} -{{- end -}} diff --git a/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml b/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml deleted file mode 100644 index e1dc20bd2..000000000 --- a/charts/v1.8.0/blob-csi-driver/templates/serviceaccount-csi-blob-node.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.node }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "blob.labels" . | nindent 4 }} -{{- end -}} diff --git a/charts/v1.8.0/blob-csi-driver/values.yaml b/charts/v1.8.0/blob-csi-driver/values.yaml deleted file mode 100644 index 35ca88248..000000000 --- a/charts/v1.8.0/blob-csi-driver/values.yaml +++ /dev/null @@ -1,160 +0,0 @@ -image: - baseRepo: mcr.microsoft.com - blob: - repository: /k8s/csi/blob-csi - tag: v1.8.0 - pullPolicy: IfNotPresent - csiProvisioner: - repository: /oss/kubernetes-csi/csi-provisioner - tag: v2.2.2 - pullPolicy: IfNotPresent - livenessProbe: - repository: /oss/kubernetes-csi/livenessprobe - tag: v2.5.0 - pullPolicy: IfNotPresent - nodeDriverRegistrar: - repository: /oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.4.0 - pullPolicy: IfNotPresent - csiResizer: - repository: /oss/kubernetes-csi/csi-resizer - tag: v1.3.0 - pullPolicy: IfNotPresent - -cloud: AzurePublicCloud - -## Reference to one or more secrets to be used when pulling images -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: myRegistryKeySecretName - -serviceAccount: - create: true # When true, service accounts will be created for you. Set to false if you want to use your own. - controller: csi-blob-controller-sa # Name of Service Account to be created or used - node: csi-blob-node-sa # Name of Service Account to be created or used - -rbac: - create: true - name: blob - -## Collection of annotations to add to all the pods -podAnnotations: {} -## Collection of labels to add to all the pods -podLabels: {} -# -- Custom labels to add into metadata -customLabels: {} - # k8s-app: blob-csi-driver - -## Leverage a PriorityClass to ensure your pods survive resource shortages -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: system-cluster-critical -## Security context give the opportunity to run container as nonroot by setting a securityContext -## by example : -## securityContext: { runAsUser: 1001 } -securityContext: {} - -controller: - name: csi-blob-controller - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - allowEmptyCloudConfig: true - hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting - metricsPort: 29634 - livenessProbe: - healthPort: 29632 - replicas: 2 - runOnMaster: false - logLevel: 5 - resources: - csiProvisioner: - limits: - memory: 500Mi - requests: - cpu: 10m - memory: 20Mi - livenessProbe: - limits: - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - memory: 200Mi - requests: - cpu: 10m - memory: 20Mi - csiResizer: - limits: - memory: 500Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - -node: - name: csi-blob-node - cloudConfigSecretName: azure-cloud-provider - cloudConfigSecretNamespace: kube-system - allowEmptyCloudConfig: true - maxUnavailable: 1 - metricsPort: 29635 - livenessProbe: - healthPort: 29633 - logLevel: 5 - enableBlobfuseProxy: false - blobfuseProxy: - installBlobfuse: true - blobfuseVersion: 1.4.2 - setMaxOpenFileNum: true - maxOpenFileNum: "9000000" - disableUpdateDB: true - blobfuseCachePath: /mnt - appendTimeStampInCacheDir: false - mountPermissions: 0777 - resources: - livenessProbe: - limits: - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - nodeDriverRegistrar: - limits: - memory: 100Mi - requests: - cpu: 10m - memory: 20Mi - blob: - limits: - memory: 2100Mi - requests: - cpu: 10m - memory: 20Mi - affinity: {} - nodeSelector: {} - tolerations: - - operator: "Exists" - livenessProbe: - healthPort: 29633 - -feature: - enableFSGroupPolicy: false - enableGetVolumeStats: false - -driver: - name: blob.csi.azure.com - customUserAgent: "" - userAgentSuffix: "OSS-helm" - -linux: - kubelet: /var/lib/kubelet - distro: debian diff --git a/charts/v1.9.0/blob-csi-driver-v1.9.0.tgz b/charts/v1.9.0/blob-csi-driver-v1.9.0.tgz deleted file mode 100644 index 780358ce3a034ca59b01695ded728ae9e4ffb85e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5815 zcmV;o7D(wIiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH>QbKAJF_gQ}hMoF8r? z4*c)7Ryv(d=jh--{omO%kNBiIH9d!4)N1dZ?cmKQ2-of5s_dC?t9trg&<^r?t zI#0%BcJ6mla72Cv#t993Xuj9LQB?TZ`dh2h@PNCFL_$3~`7LIE-uZNlMuIXt1#~(( zM_PCqAr7F243TQg4S+A*4o{ z)hK;3hY&cwWONMcVN9DuvPl77w{^H0Ur0STQ@V8pQ^EzqkLagtoM@)Q(&i-!4%TAw z6>##~V;IbIgVK1eMAdM88hgnkaT$+&;-1PUjmE22=n5vlAanrO zaQS!)DCXdy&oc-W8WK*Cl29%|FqrhvOo)j0+6AIpgtk5E^0vVImbYCRPRKN7;6&7O zTp9{YLSStP&r!gfh>dOIXhIoU(71(0 zAS62`*yn)AL_S2Hai|`~9OQ3?=;yE{@zGE)>ifWYa-8mE#{S2`FHG!8>5u>7TD0(+kN&-am- z0g9-{8xi$R@>)G~|4;?v<1zTlAbe02bYtW==rrbn21Yv)3W@>{*uw%hTvese5TRG- z_6P3ZD60BBlBN$Z2jwMUbjb*1L@Z8y%oThHxpbz~U`nD8kJ+5e0Wsh-W-cH;qfD$T z{Vok%h(xXciaTwo1W|(8i5NhjT)B25D$+AlBf=M4K;XE3%mpyVWke7c`x5&I+*sCD z@&F0(m(o0)Dt_f!-9uO!Fh{-A zQ;k<&n@8A%9+Cs%X#O>jlr{Q!5JZx@S({A!XOxQ}h|iSWW_Ci%2+#QA(%AQ;LV1Ma zu@9a!0y*KxqagVzkOvWj9-<-oI66<8H;|NAMqGYLndqVJ-;VYV>@(jCurFqLwQfUA z4?b}*?;-Uo4llxwSjt0B8zc#dPNiV`J#?rVXmRBlrfp7_$O9sH`b$z6s0rw>)A>k} zrwslR125KcqgW1gP-)gpTUk0)k8JOW$Z{{J9!K|yILz7lYq1VbCPeCNV#lP3AAz)= zlqHV@^?@ZORzY#lEi8Jdsip%)eQ1@_q|Lh~0gMC=qVrHyIU@!86iA=s z5?DYu*B(OW`1rVLs-D$&)Gh>G*j_yi$wp?xs%46_(8ML@JSfoNw8A=eGom=o@3Vb$+(?7X()*ojP{TRV}iq%j)s}>9|1w+V*$K9 zytp2WTH;PT8ON&l@BUHOi2ocO9UkuDKhIIFXRth|iwH;4#4q(A^63K9a!M^*l9fWM z(YU6_oq>D10+?Rs2rOd$J&Jt59HficYa~kJ3LiO6m`%}Oiu@X$n#^BD_w_R)NH;Ka(b2y8lLUIU zMNKCMCKtX=N?YSpMVK@#nmS2pvh{>8*MY_HBBxkKdRh%T;i^iGEMg=S6V&_@cmBki z2s7c($)kH7 z;1Ey2bHdPx z|Ibnk&6Ml+mL+7AOf7k?2#zA&p6@koN$B;^8HRv{BM^;rucR7e%VjBfm4wRbT>J(~ zW)mF?sLmIDHcxy(AqhPQ1v-!oc?4?LDV38zT_lZ4arX-KojgFA3zm6ua!S&qS60}FWh zwKSND7lXTqK@uP{YM?wCE8RiM+hXK(NX2<*t)sI^Y|u4^IbkBkzLQ5skP1N(ahlCB zdF#~BM+_p&z!{SeGXgxFUS^nIGElBnyIGzzT1sN_oo_8)Y0*g#s3}FU+$u?mKo1Yq zcNK^|dmVwNasjGmAfLm!xHp2YC%YWHOM?JQ75(K6pS}4Wy>Z+(-=n4*A;ZA8|Jw3k zjM^fI+Rc9zAk0+&?W5YVE{~*>1X^`;Rs@kT&U&;&5IG)Ujg%hSxTaJZ;6o0f0A@UW z<2VAdDTp`bi@nMd$5A9&p2vb)o|4G5JPj?l948{4<*E6I&nk5)D83kslS?t@&Q!be z;pqD0!-t!9A1>a#|1=u(U*DULdX8W*JQxkh1igKWn$_wLP1NtBrfkzhfBB2iOw6FF z)n0TvMGnz&5@GiiPk}pGx{#D5PYKeW|BP@XP^<#`$uDOLBHds>&LRu)_g~+qB;EWx zxEh^b49z}FNMi{hhOgqm*r*-cdW0b-ssv=)ksnVX87r2|Mh+P=;CrP%$~0pUu&BU2AMCo3dDUz6W*E- zADjT+^c5*#9 zd)I$`Z$4VV1+%)r04a-a-y#8AAZLPJ-&g5;Sd&lS&M0b*VM3V-Duh!KPVEqTWf zv9Q;zjt6*B)Zg&-<<+O*;PT}9@1uV6S0(!8CqXFCuTsW;l|ue&D&kFgYlGH2!xI5avkacVnyS#fLe0;ZsA#oZs+QF*OTiLUtEGvV`v4Kz z=M|s=6F={;2IkYLS@GXAW+C>K^28=`XR>ot>K&DAZC{hcn3gcjRC0yyBy)+)?@pvG zoL8I*1>kJ(?)+qU^Zx2$cs&@N8RMw@R+Whm66t|pSCBb0t&q$~wFXpyC2EmX+Invp zyY7ljHe}{Bxe+pBBfl1zbvoGyV{!7diWBTTl3Hi_@47}_eZ8-)S$gjPHF^u0#0UeItgp*B*hJ6t)* zN4nt#fyZ@dS(kA?RGQ?m;Vfr9-A*xfmGbvX;1zab%AgAg{dt?hS%98pAIoU=1r@Ox zUZ$1?`S3FlGaJX!=xhOf7MU&JUXG^1HT#|wDtf$y9WS$omDXkC6~0<#Zo7CCs^sm; zr&)%krk7tApyd!Y@dGL>E9z#ReqIoyGCW3o=pmhaY|2_PKy^c2B-hv~T{X-W@1?-1 z%3E`vw~nHME0g0Y`8SWC(H?Bgw`GdQ=+;=8mNWaeyGUIIySkh-YHzr3-5ySJF^k!> z5-V>y^M2P{j5HnPd|}UHzlO)4nU-cXJNoeG8d_G$gWj%^EdgDX_j2V8Rcu=r z*HN&tD%A<3uBk>N(-$M)CoAaGb0?xq^zAcfnc6&6eMMMnMXxsz|0M!0OTBh%ApRw4 zS4zJ+;<*cYK60LL=5->Mob0hgwWN6uy@--f^k2U{9i86{hG&--=fi8;RFWJo%9N$k zQvZ5(w1vY+cFa00L~JvkN<(vE4$Q$*2!*A!aSFo@D2L#1m0M{6WE|Y6 zp-7$a(g$cAo$}_G-6}A~dIIcmkn=S6HNmILXA2H>%*=rx5{nYP8t~RrX$E9z|0)ui zxt024&Qr`I%I0TX<2U17#HgSy^?T_0^l}q2%Cc+4oK#s8v-QlFE)z`rn8Edo0iRLd z(+BYKG7}>D5ri4YO~Q;d3{ZWuy^||bg8eh_@nQrn4L#mN`<-&AQcrN%>%H>p2pFNB zy>Y!8_EFbc+Q0Q zwj3mtpCq|wc}LYLckGv<)a8y8jilYFjYD$RYq#rNiDYVn|Ky@4RS7lKDVUMK?yb_T zrYU=6W)6lwpI==JKMsc1ikp`w*MF~Zvq89p!~bo?BrfqN#I1jG8dfM=z1L=+x}Mb) zHyaxrwF1gTWV@rnxxd7Rl9X}ow_RYuv>FVscM<8M9OVTEfA zJES!?4|6;b*LrU2x`7O97-U6o%UL{Rj^h0=u)T2&ykOu${ZK76DeWVe`HN}h3%rYAaR5X~R} zhW+1>VOm4~RpQ%IE`(jK|J}~Pp?Uwu@nQFPr~l7U%=3GXPXAHTvCYIE1K>txLuZbU z1j7QR$-z453zKlFlQmQK$Gm&Xq@(;krAYgtS4DuEWDZq&`XV_)7AkhxLd8>?-&#UX zA%NnAJe|DG^xYbnxBZw*9w`Ew=I~UKS+qQDo>;RxBx_)>ACj%erQ&-rhs~mN@dc51yg^kO68t87B#jY}i497M6|rZ&)`io1NYXUIp>^W3&F z(aI-xi!HOGu9sIA|M=%pAUeKhz^c$}yVl4`zN}5W{_D3teR?;z8D5+XZiXix2OB2R z={UmIN}*dLVNDV6HB!~;0WV2X`}UIl)=8Vw3#T;_=E@Q|1(EJpbAbgff-DE8%Qscm z5UOrISFR}+IEWk*HP)ULkHA&hr9Mwptn}ZRs<_F$2k4nn z9GCg`R^OS?2v4#85a&DMsZsM8k_631-pTQ#rEz)YUGp*zi$ZTs;zktI=<|~qb(4PuaTBs?SN$t)Re4IT-x8~| zHakk!`D9-iDvL+0`PwBL?vf4v7^Ob{hmEo8Y~>Q575RTh`^U!j-@ALqoz5=*=Q)a@ zQF72*`i1~?<#&?&r$6&^MSUnG2&vjQ7BkAoZ)yf8&KSl7D$}P6gF&1B9-RNw79no!ex?RnFbCVNPneKIVMQey-Ukm_sNw;9Sz1?@=?V zL$|tHE+=w2-)e`R1kk0Fc_b{VgbjzvjoS*-8jj;Ms@nRFgdPc}Uqfk)`(vDOf@<@W z-+26Qa7CiEx+|kno>C+C$sThn(K;!K(%K~9m#WEoiv3E(VhP{t2Jfqy+T;w}@4?Kj zVIsC{WuJthtv_#PBHFMQcDwpCgZk{&Ny_(g+otUZ_#eP18H`xlAZ^ti$q}g?t&BP8 zZsYSArR1|#D}+3e?SIuUW($)1IR5+>%(WW{%sR6DP2a}*{AJzs|5FYDtyur>?H_c_ z^M42Xd%N}jbCeb9|7V>DR6gOiI0d|r;V<&zTQT{?qax2@>@7mEGxOUk_4Yp*c&rEh zDUsk+=f5l8fA8)eA0O@P|8tbej!N0V%Vz>33&mN!oi;%^y+P*Z$zpe6k z{eQeCxKjTQjqksAyN3rm|L `blobfuse-proxy` parameter is only available for debian based agent nodes, remove it if it's not applicable for your cluster. -> -If you have already installed Helm, you can also use it to install Azure Blob Storage CSI driver. Please see [Installation with Helm](../charts/README.md). - -## Install with kubectl - - remote install -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.7.0/deploy/install-driver.sh | bash -s v1.7.0 blobfuse-proxy -- -``` - - - local install -```console -git clone https://github.com/kubernetes-sigs/blob-csi-driver.git -cd blob-csi-driver -./deploy/install-driver.sh v1.7.0 local,blobfuse-proxy -``` - -- check pods status: -```console -kubectl -n kube-system get pod -o wide -l app=csi-blob-controller -kubectl -n kube-system get pod -o wide -l app=csi-blob-node -``` - -example output: - -```console -NAME READY STATUS RESTARTS AGE IP NODE -csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 -csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 -``` - -- clean up Azure Blob Storage CSI driver -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.7.0/deploy/uninstall-driver.sh | bash -s v1.7.0 blobfuse-proxy -- -``` diff --git a/docs/install-csi-driver-v1.8.0.md b/docs/install-csi-driver-v1.8.0.md deleted file mode 100644 index e1f354da3..000000000 --- a/docs/install-csi-driver-v1.8.0.md +++ /dev/null @@ -1,38 +0,0 @@ -# Install Azure Blob Storage CSI driver v1.8.0 version on a kubernetes cluster -> `blobfuse-proxy` parameter is only available for debian based agent nodes, remove it if it's not applicable for your cluster. -> -If you have already installed Helm, you can also use it to install Azure Blob Storage CSI driver. Please see [Installation with Helm](../charts/README.md). - -## Install with kubectl - - remote install -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.8.0/deploy/install-driver.sh | bash -s v1.8.0 blobfuse-proxy -- -``` - - - local install -```console -git clone https://github.com/kubernetes-sigs/blob-csi-driver.git -cd blob-csi-driver -./deploy/install-driver.sh v1.8.0 local,blobfuse-proxy -``` - -- check pods status: -```console -kubectl -n kube-system get pod -o wide -l app=csi-blob-controller -kubectl -n kube-system get pod -o wide -l app=csi-blob-node -``` - -example output: - -```console -NAME READY STATUS RESTARTS AGE IP NODE -csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 -csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 -``` - -- clean up Azure Blob Storage CSI driver -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.8.0/deploy/uninstall-driver.sh | bash -s v1.8.0 blobfuse-proxy -- -``` diff --git a/docs/install-csi-driver-v1.9.0.md b/docs/install-csi-driver-v1.9.0.md deleted file mode 100644 index 806095972..000000000 --- a/docs/install-csi-driver-v1.9.0.md +++ /dev/null @@ -1,47 +0,0 @@ -# Install Azure Blob Storage CSI driver v1.9.0 version on a kubernetes cluster -> `blobfuse-proxy` is only available for debian based agent nodes, remove `blobfuse-proxy` parameter in installation steps if it's not applicable. -> -If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md). - -## Install with kubectl - - Option#1. remote install -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.9.0/deploy/install-driver.sh | bash -s v1.9.0 blobfuse-proxy -- -``` - - - Option#2. local install -```console -git clone https://github.com/kubernetes-sigs/blob-csi-driver.git -cd blob-csi-driver -./deploy/install-driver.sh v1.9.0 local,blobfuse-proxy -``` - -- check pods status: -```console -kubectl -n kube-system get pod -o wide -l app=csi-blob-controller -kubectl -n kube-system get pod -o wide -l app=csi-blob-node -``` - -example output: - -```console -NAME READY STATUS RESTARTS AGE IP NODE -csi-blob-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0 -csi-blob-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1 -csi-blob-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0 -``` - -### clean up Blob CSI driver -- Option#1. remote uninstall -```console -curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/blob-csi-driver/v1.9.0/deploy/uninstall-driver.sh | bash -s v1.9.0 blobfuse-proxy -- -``` - - - Option#2. local uninstall -```console -git clone https://github.com/kubernetes-sigs/blob-csi-driver.git -cd blob-csi-driver -git checkout v1.9.0 -./deploy/install-driver.sh v1.9.0 local -``` From 6de2f4ddd1a43e762e90a5422ac541316f26ce23 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 18 Apr 2024 03:12:41 +0000 Subject: [PATCH 132/157] chore: vendor metrics log level change --- go.mod | 16 ++-- go.sum | 8 +- vendor/modules.txt | 16 ++-- .../azureclients/armclient/azure_armclient.go | 1 + .../pkg/azureclients/armclient/util.go | 2 +- .../vmssvmclient/azure_vmssvmclient.go | 78 +++++++++++----- .../cloud-provider-azure/pkg/consts/consts.go | 4 +- .../pkg/metrics/azure_metrics.go | 5 +- .../pkg/provider/azure_loadbalancer.go | 7 ++ .../pkg/provider/azure_zones.go | 3 + .../provider/loadbalancer/accesscontrol.go | 21 +++-- .../provider/loadbalancer/configuration.go | 89 ++++++++++++------- .../provider/loadbalancer/iputil/prefix.go | 18 ++-- .../securitygroup/securitygroup.go | 7 +- .../pkg/retry/azure_error.go | 8 +- 15 files changed, 178 insertions(+), 105 deletions(-) diff --git a/go.mod b/go.mod index 3e0ae7384..68f28ad79 100644 --- a/go.mod +++ b/go.mod @@ -32,12 +32,12 @@ require ( k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v0.29.3 - k8s.io/component-base v0.29.2 + k8s.io/component-base v0.29.3 k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc + sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 sigs.k8s.io/yaml v1.4.0 ) @@ -146,14 +146,14 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/apiserver v0.29.2 - k8s.io/cloud-provider v0.29.2 // indirect - k8s.io/component-helpers v0.29.2 // indirect - k8s.io/controller-manager v0.29.2 // indirect - k8s.io/kms v0.29.2 // indirect + k8s.io/apiserver v0.29.3 + k8s.io/cloud-provider v0.29.3 // indirect + k8s.io/component-helpers v0.29.3 // indirect + k8s.io/controller-manager v0.29.3 // indirect + k8s.io/kms v0.29.3 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.29.2 // indirect + k8s.io/kubelet v0.29.3 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 diff --git a/go.sum b/go.sum index 33af417d8..ca06a1283 100644 --- a/go.sum +++ b/go.sum @@ -448,8 +448,8 @@ k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8 k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= -k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= +k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= +k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= @@ -466,8 +466,8 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc h1:y/jDBU+4EcPiHGPMQeK6B4IKEhOp3CCts/ZaRH1sa3w= -sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc/go.mod h1:nS6tUQgopFVyqFQW1P/luDXNNQfsQOg3WFs9GhzNjm8= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp43Ayaz78K+yK0bVrOVMvQ6QrIW/XTi0r3uAA5g= +sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 h1:dxpo41/N6m2R//9fmqKgqYZL2k0rQSE1NvQteIQ9pGA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13/go.mod h1:tN2BDTM6RDyQsae6JRvaaA14LVxDsRaLU3Ea2MRUBjg= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 h1:CFMHYo6/OQpLTycJGQIze2pchNeJQ7L2TQC6fDo4JGY= diff --git a/vendor/modules.txt b/vendor/modules.txt index bf672882e..086d0381b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -887,7 +887,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.2 => k8s.io/apiserver v0.29.0 +# k8s.io/apiserver v0.29.3 => k8s.io/apiserver v0.29.0 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1358,7 +1358,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.29.2 => k8s.io/cloud-provider v0.29.0 +# k8s.io/cloud-provider v0.29.3 => k8s.io/cloud-provider v0.29.0 ## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1374,7 +1374,7 @@ k8s.io/cloud-provider/names k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers -# k8s.io/component-base v0.29.2 => k8s.io/component-base v0.29.0 +# k8s.io/component-base v0.29.3 => k8s.io/component-base v0.29.0 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1396,14 +1396,14 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.29.2 => k8s.io/component-helpers v0.29.0 +# k8s.io/component-helpers v0.29.3 => k8s.io/component-helpers v0.29.0 ## explicit; go 1.21 k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.29.2 => k8s.io/controller-manager v0.29.0 +# k8s.io/controller-manager v0.29.3 => k8s.io/controller-manager v0.29.0 ## explicit; go 1.21 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1424,7 +1424,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.29.2 +# k8s.io/kms v0.29.3 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1455,7 +1455,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson ## explicit; go 1.21 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.29.2 => k8s.io/kubelet v0.29.0 +# k8s.io/kubelet v0.29.3 => k8s.io/kubelet v0.29.0 ## explicit; go 1.21 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 @@ -1561,7 +1561,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240319093822-58cad130d9dc +# sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azureclients sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go index 2dfb7c9ff..2c6b5c470 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go @@ -81,6 +81,7 @@ func sender() autorest.Sender { IdleConnTimeout: 90 * time.Second, // the same as default transport TLSHandshakeTimeout: 10 * time.Second, // the same as default transport ExpectContinueTimeout: 1 * time.Second, // the same as default transport + ResponseHeaderTimeout: 60 * time.Second, TLSClientConfig: &tls.Config{ MinVersion: tls.VersionTLS12, //force to use TLS 1.2 Renegotiation: tls.RenegotiateNever, // the same as default transport https://pkg.go.dev/crypto/tls#RenegotiationSupport diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go index 073ce78ef..a899d1680 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/util.go @@ -121,7 +121,7 @@ func DoHackRegionalRetryForGET(c *Client) autorest.SendDecorator { bodyString := string(bodyBytes) trimmed := strings.TrimSpace(bodyString) - klog.V(6).Infof("Send.sendRequest got response with ContentLength %d, StatusCode %d and responseBody length %d", response.ContentLength, response.StatusCode, len(trimmed)) + klog.V(6).Infof("%s %s got response with ContentLength %d, StatusCode %d and responseBody length %d", request.Method, request.URL.Path, response.ContentLength, response.StatusCode, len(trimmed)) // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow // Empty content and 2xx http status code are returned in this case. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go index 4ad702caa..6aba901b1 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/azure_vmssvmclient.go @@ -509,7 +509,48 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM } responses := c.armClient.PutResourcesInBatches(ctx, resources, batchSize) - errors := make([]*retry.Error, 0) + errors, retryIDs := c.parseResp(ctx, responses, true) + if len(retryIDs) > 0 { + retryResources := make(map[string]interface{}) + for _, id := range retryIDs { + retryResources[id] = resources[id] + } + resps := c.armClient.PutResourcesInBatches(ctx, retryResources, batchSize) + errs, _ := c.parseResp(ctx, resps, false) + errors = append(errors, errs...) + } + + // Aggregate errors. + if len(errors) > 0 { + rerr := &retry.Error{} + errs := make([]error, 0) + for _, err := range errors { + if !err.Retriable && strings.Contains(err.Error().Error(), consts.ConcurrentRequestConflictMessage) { + err.Retriable = true + err.RetryAfter = time.Now().Add(5 * time.Second) + } + + if err.IsThrottled() && err.RetryAfter.After(rerr.RetryAfter) { + rerr.RetryAfter = err.RetryAfter + } + errs = append(errs, err.Error()) + } + rerr.RawError = utilerrors.Flatten(utilerrors.NewAggregate(errs)) + return rerr + } + + return nil +} + +func (c *Client) parseResp( + ctx context.Context, + responses map[string]*armclient.PutResourcesResponse, + shouldRetry bool, +) ([]*retry.Error, []string) { + var ( + errors []*retry.Error + retryIDs []string + ) for resourceID, resp := range responses { if resp == nil { continue @@ -534,6 +575,19 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM continue } + if retry.IsSuccessHTTPResponse(resp.Response) && + strings.Contains( + strings.ToLower(errMsg), + strings.ToLower(consts.OperationPreemptedErrorMessage), + ) { + if shouldRetry { + klog.V(2).Infof("The operation on VM %s is preempted, will retry.", resourceID) + retryIDs = append(retryIDs, resourceID) + continue + } + klog.V(2).Infof("The operation on VM %s is preempted, will not retry.", resourceID) + } + errors = append(errors, resp.Error) continue } @@ -546,25 +600,5 @@ func (c *Client) updateVMSSVMs(ctx context.Context, resourceGroupName string, VM } } } - - // Aggregate errors. - if len(errors) > 0 { - rerr := &retry.Error{} - errs := make([]error, 0) - for _, err := range errors { - if !err.Retriable && strings.Contains(err.Error().Error(), consts.ConcurrentRequestConflictMessage) { - err.Retriable = true - err.RetryAfter = time.Now().Add(5 * time.Second) - } - - if err.IsThrottled() && err.RetryAfter.After(rerr.RetryAfter) { - rerr.RetryAfter = err.RetryAfter - } - errs = append(errs, err.Error()) - } - rerr.RawError = utilerrors.Flatten(utilerrors.NewAggregate(errs)) - return rerr - } - - return nil + return errors, retryIDs } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index f9692c201..65cf63110 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -413,8 +413,8 @@ const ( CannotUpdateVMBeingDeletedMessagePrefix = "'Put on Virtual Machine Scale Set VM Instance' is not allowed on Virtual Machine Scale Set" // CannotUpdateVMBeingDeletedMessageSuffix is the suffix of the error message that the request failed due to delete a VM that is being deleted CannotUpdateVMBeingDeletedMessageSuffix = "since it is marked for deletion" - // OperationPreemptedErrorCode is the error code returned for vm operation preempted errors - OperationPreemptedErrorCode = "OperationPreempted" + // OperationPreemptedErrorMessage is the error message returned for vm operation preempted errors + OperationPreemptedErrorMessage = "Operation execution has been preempted by a more recent operation" ) // node ipam controller diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go index 70acb3066..b80c29fb7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/metrics/azure_metrics.go @@ -60,6 +60,8 @@ type operationCallMetrics struct { type MetricContext struct { start time.Time attributes []string + // log level in ObserveOperationWithResult + LogLevel int32 } // NewMetricContext creates a new MetricContext. @@ -67,6 +69,7 @@ func NewMetricContext(prefix, request, resourceGroup, subscriptionID, source str return &MetricContext{ start: time.Now(), attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source}, + LogLevel: 3, } } @@ -104,7 +107,7 @@ func (mc *MetricContext) ObserveOperationWithResult(isOperationSucceeded bool, l } mc.CountFailedOperation() } - mc.logLatency(3, latency, append(labelAndValues, "result_code", resultCode)...) + mc.logLatency(mc.LogLevel, latency, append(labelAndValues, "result_code", resultCode)...) } func (mc *MetricContext) logLatency(logLevel int32, latency float64, additionalKeysAndValues ...interface{}) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index c4a4d2c50..5d25ce124 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -2856,6 +2856,13 @@ func (az *Cloud) reconcileSecurityGroup( consts.ServiceAnnotationAllowedIPRanges, consts.ServiceAnnotationAllowedServiceTags, )) } + + if len(accessControl.InvalidRanges) > 0 { + az.Event(service, v1.EventTypeWarning, "InvalidConfiguration", fmt.Sprintf( + "Found invalid LoadBalancerSourceRanges %v, ignoring and adding a default DenyAll rule in security group.", + accessControl.InvalidRanges, + )) + } } var ( diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go index 458e31f07..e15b1c209 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go @@ -179,6 +179,9 @@ func (az *Cloud) GetZone(_ context.Context) (cloudprovider.Zone, error) { if err != nil { return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel") } + if az.VMSet == nil { + return cloudprovider.Zone{}, fmt.Errorf("VMSet is not initialized") + } return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname)) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go index 05e393d6d..fcdb92c92 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/accesscontrol.go @@ -46,6 +46,7 @@ type AccessControl struct { // immutable pre-compute states. SourceRanges []netip.Prefix AllowedIPRanges []netip.Prefix + InvalidRanges []string AllowedServiceTags []string securityRuleDestinationPortsByProtocol map[network.SecurityRuleProtocol][]int32 } @@ -82,20 +83,17 @@ func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...Access logger.Error(err, "Failed to initialize RuleHelper") return nil, err } - sourceRanges, err := SourceRanges(svc) + sourceRanges, invalidSourceRanges, err := SourceRanges(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse SourceRange configuration") - return nil, err } - allowedIPRanges, err := AllowedIPRanges(svc) + allowedIPRanges, invalidAllowedIPRanges, err := AllowedIPRanges(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse AllowedIPRanges configuration") - return nil, err } allowedServiceTags, err := AllowedServiceTags(svc) if err != nil && !options.SkipAnnotationValidation { logger.Error(err, "Failed to parse AllowedServiceTags configuration") - return nil, err } securityRuleDestinationPortsByProtocol, err := securityRuleDestinationPortsByProtocol(svc) if err != nil { @@ -114,13 +112,14 @@ func NewAccessControl(svc *v1.Service, sg *network.SecurityGroup, opts ...Access SourceRanges: sourceRanges, AllowedIPRanges: allowedIPRanges, AllowedServiceTags: allowedServiceTags, + InvalidRanges: append(invalidSourceRanges, invalidAllowedIPRanges...), securityRuleDestinationPortsByProtocol: securityRuleDestinationPortsByProtocol, }, nil } // IsAllowFromInternet returns true if the given service is allowed to be accessed from internet. // To be specific, -// 1. For all types of LB, it returns false if the given service is specified with `service tags` or `not allowed all IP ranges`. +// 1. For all types of LB, it returns false if the given service is specified with `service tags` or `not allowed all IP ranges`, including invalid IP ranges. // 2. For internal LB, it returns true iff the given service is explicitly specified with `allowed all IP ranges`. Refer: https://github.com/kubernetes-sigs/cloud-provider-azure/issues/698 func (ac *AccessControl) IsAllowFromInternet() bool { if len(ac.AllowedServiceTags) > 0 { @@ -132,6 +131,9 @@ func (ac *AccessControl) IsAllowFromInternet() bool { if len(ac.AllowedIPRanges) > 0 && !iputil.IsPrefixesAllowAll(ac.AllowedIPRanges) { return false } + if len(ac.InvalidRanges) > 0 { + return false + } if !IsInternal(ac.svc) { return true } @@ -143,10 +145,11 @@ func (ac *AccessControl) IsAllowFromInternet() bool { // By default, NSG allow traffic from the VNet. func (ac *AccessControl) DenyAllExceptSourceRanges() bool { var ( - annotationEnabled = strings.EqualFold(ac.svc.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges], "true") - sourceRangeSpecified = len(ac.SourceRanges) > 0 || len(ac.AllowedIPRanges) > 0 + annotationEnabled = strings.EqualFold(ac.svc.Annotations[consts.ServiceAnnotationDenyAllExceptLoadBalancerSourceRanges], "true") + sourceRangeSpecified = len(ac.SourceRanges) > 0 || len(ac.AllowedIPRanges) > 0 + invalidRangesSpecified = len(ac.InvalidRanges) > 0 ) - return annotationEnabled && sourceRangeSpecified + return (annotationEnabled && sourceRangeSpecified) || invalidRangesSpecified } // AllowedIPv4Ranges returns the IPv4 ranges that are allowed to access the LoadBalancer. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go index 436bca4b1..04d04d448 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/configuration.go @@ -17,6 +17,7 @@ limitations under the License. package loadbalancer import ( + "errors" "fmt" "net/netip" "strings" @@ -45,55 +46,75 @@ func AllowedServiceTags(svc *v1.Service) ([]string, error) { return nil, nil } - return strings.Split(strings.TrimSpace(value), Sep), nil + tags := strings.Split(strings.TrimSpace(value), Sep) + for i := range tags { + tags[i] = strings.TrimSpace(tags[i]) + } + return tags, nil } -// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotation. -func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, error) { +// AllowedIPRanges returns the allowed IP ranges configured by user through AKS custom annotations: +// service.beta.kubernetes.io/azure-allowed-ip-ranges and service.beta.kubernetes.io/load-balancer-source-ranges +func AllowedIPRanges(svc *v1.Service) ([]netip.Prefix, []string, error) { const ( Sep = "," - Key = consts.ServiceAnnotationAllowedIPRanges + ) + var ( + errs []error + validRanges []netip.Prefix + invalidRanges []string ) - value, found := svc.Annotations[Key] - if !found { - return nil, nil - } + for _, key := range []string{consts.ServiceAnnotationAllowedIPRanges, v1.AnnotationLoadBalancerSourceRangesKey} { + value, found := svc.Annotations[key] + if !found { + continue + } - rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) - if err != nil { - return nil, NewErrAnnotationValue(Key, value, err) + var errsByKey []error + for _, p := range strings.Split(strings.TrimSpace(value), Sep) { + p = strings.TrimSpace(p) + prefix, err := iputil.ParsePrefix(p) + if err != nil { + errsByKey = append(errsByKey, err) + invalidRanges = append(invalidRanges, p) + } else { + validRanges = append(validRanges, prefix) + } + } + if len(errsByKey) > 0 { + errs = append(errs, NewErrAnnotationValue(key, value, errors.Join(errsByKey...))) + } } - return rv, nil + if len(errs) > 0 { + return validRanges, invalidRanges, errors.Join(errs...) + } + return validRanges, invalidRanges, nil } -// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges` and standard annotation. -// If `spec.LoadBalancerSourceRanges` is not set, it will try to parse the annotation. -func SourceRanges(svc *v1.Service) ([]netip.Prefix, error) { - // Read from spec - if len(svc.Spec.LoadBalancerSourceRanges) > 0 { - rv, err := iputil.ParsePrefixes(svc.Spec.LoadBalancerSourceRanges) +// SourceRanges returns the allowed IP ranges configured by user through `spec.LoadBalancerSourceRanges`. +func SourceRanges(svc *v1.Service) ([]netip.Prefix, []string, error) { + var ( + errs []error + validRanges []netip.Prefix + invalidRanges []string + ) + + for _, p := range svc.Spec.LoadBalancerSourceRanges { + p = strings.TrimSpace(p) + prefix, err := iputil.ParsePrefix(p) if err != nil { - return nil, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, err) + errs = append(errs, err) + invalidRanges = append(invalidRanges, p) + } else { + validRanges = append(validRanges, prefix) } - return rv, nil } - - // Read from annotation - const ( - Sep = "," - Key = v1.AnnotationLoadBalancerSourceRangesKey - ) - value, found := svc.Annotations[Key] - if !found { - return nil, nil - } - rv, err := iputil.ParsePrefixes(strings.Split(strings.TrimSpace(value), Sep)) - if err != nil { - return nil, NewErrAnnotationValue(Key, value, err) + if len(errs) > 0 { + return validRanges, invalidRanges, fmt.Errorf("invalid service.Spec.LoadBalancerSourceRanges [%v]: %w", svc.Spec.LoadBalancerSourceRanges, errors.Join(errs...)) } - return rv, nil + return validRanges, invalidRanges, nil } func AdditionalPublicIPs(svc *v1.Service) ([]netip.Addr, error) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go index a5644bfbc..fdde2d86e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/iputil/prefix.go @@ -30,14 +30,14 @@ func IsPrefixesAllowAll(prefixes []netip.Prefix) bool { return false } -func ParsePrefixes(vs []string) ([]netip.Prefix, error) { - var rv []netip.Prefix - for _, v := range vs { - prefix, err := netip.ParsePrefix(v) - if err != nil { - return nil, fmt.Errorf("invalid CIDR `%s`: %w", v, err) - } - rv = append(rv, prefix) +func ParsePrefix(v string) (netip.Prefix, error) { + prefix, err := netip.ParsePrefix(v) + if err != nil { + return netip.Prefix{}, fmt.Errorf("invalid CIDR `%s`: %w", v, err) + } + masked := prefix.Masked() + if prefix.Addr().Compare(masked.Addr()) != 0 { + return netip.Prefix{}, fmt.Errorf("invalid CIDR `%s`: not a valid network prefix, should be properly masked like %s", v, masked) } - return rv, nil + return prefix, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go index 82b617e25..659944f98 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/loadbalancer/securitygroup/securitygroup.go @@ -313,10 +313,11 @@ func (helper *RuleHelper) RemoveDestinationPrefixesFromRules(prefixes []string) } for _, rule := range helper.rules { + if rule.DestinationAddressPrefix != nil && index[*rule.DestinationAddressPrefix] { + rule.DestinationAddressPrefix = nil + continue + } if rule.DestinationAddressPrefixes == nil { - if rule.DestinationAddressPrefix != nil && index[*rule.DestinationAddressPrefix] { - rule.DestinationAddressPrefix = nil - } continue } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go index 81691fc89..923e33de0 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/retry/azure_error.go @@ -149,7 +149,7 @@ func GetError(resp *http.Response, err error) *Error { return nil } - if err == nil && resp != nil && isSuccessHTTPResponse(resp) { + if err == nil && resp != nil && IsSuccessHTTPResponse(resp) { // HTTP 2xx suggests a successful response return nil } @@ -166,8 +166,8 @@ func GetError(resp *http.Response, err error) *Error { } } -// isSuccessHTTPResponse determines if the response from an HTTP request suggests success -func isSuccessHTTPResponse(resp *http.Response) bool { +// IsSuccessHTTPResponse determines if the response from an HTTP request suggests success +func IsSuccessHTTPResponse(resp *http.Response) bool { if resp == nil { return false } @@ -219,7 +219,7 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool { } // should retry on <200, error>. - if isSuccessHTTPResponse(resp) && err != nil { + if IsSuccessHTTPResponse(resp) && err != nil { return true } From 0fc0ef8265bdcf39e9594ecccfcfbce4a1261408 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 18 Apr 2024 03:14:04 +0000 Subject: [PATCH 133/157] chore: set node_get_volume_stats log level to 6 --- pkg/blob/nodeserver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index 61f78bd7d..7b308d2fd 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -543,6 +543,7 @@ func (d *Driver) NodeGetVolumeStats(_ context.Context, req *csi.NodeGetVolumeSta } mc := metrics.NewMetricContext(blobCSIDriverName, "node_get_volume_stats", d.cloud.ResourceGroup, "", d.Name) + mc.LogLevel = 6 // change log level isOperationSucceeded := false defer func() { mc.ObserveOperationWithResult(isOperationSucceeded, VolumeID, req.VolumeId) From f84dbb467d1c4155f0f69e7fe6335d4a43889590 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 18 Apr 2024 03:43:31 +0000 Subject: [PATCH 134/157] test: ignore azcopy CVE-2023-45288 --- .trivyignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 000000000..6c9632de0 --- /dev/null +++ b/.trivyignore @@ -0,0 +1 @@ +CVE-2023-45288 From 3b101da052171fc8fd4a1d614d506d6056c45bcf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:25:47 +0000 Subject: [PATCH 135/157] chore(deps): bump github.com/onsi/gomega from 1.32.0 to 1.33.0 Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.32.0 to 1.33.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.32.0...v1.33.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 11 +++ vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/github.com/onsi/gomega/matchers.go | 15 ++-- .../onsi/gomega/matchers/receive_matcher.go | 70 ++++++++++++++----- vendor/modules.txt | 2 +- 7 files changed, 77 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 68f28ad79..a86f42363 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.32.0 + github.com/onsi/gomega v1.33.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index ca06a1283..d23a31839 100644 --- a/go.sum +++ b/go.sum @@ -213,8 +213,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 01ec5245c..89230f199 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,14 @@ +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + ## 1.32.0 ### Maintenance diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index ffb81b1fe..1980a63c7 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.32.0" +const GOMEGA_VERSION = "1.33.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 8860d677f..7ef27dc9c 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba5..948164eaf 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/modules.txt b/vendor/modules.txt index 086d0381b..e5374b08c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -372,7 +372,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.32.0 +# github.com/onsi/gomega v1.33.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format From d543b7033a1d5f96632b394eee53c04fb2082fab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:37:15 +0000 Subject: [PATCH 136/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.4 to 0.0.7. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.4...pkg/azclient/v0.0.7) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 12 ++++++------ vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go | 3 +++ vendor/k8s.io/kube-openapi/pkg/common/common.go | 3 +++ vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go | 3 --- vendor/modules.txt | 14 +++++++------- 6 files changed, 25 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index a86f42363..10d98db99 100644 --- a/go.mod +++ b/go.mod @@ -29,16 +29,16 @@ require ( golang.org/x/net v0.24.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/client-go v0.30.0 k8s.io/component-base v0.29.3 k8s.io/klog/v2 v2.120.1 k8s.io/kubernetes v1.29.0 k8s.io/mount-utils v0.29.0 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 sigs.k8s.io/yaml v1.4.0 ) @@ -136,7 +136,7 @@ require ( golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect @@ -151,7 +151,7 @@ require ( k8s.io/component-helpers v0.29.3 // indirect k8s.io/controller-manager v0.29.3 // indirect k8s.io/kms v0.29.3 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.29.3 // indirect k8s.io/pod-security-admission v0.29.0 diff --git a/go.sum b/go.sum index d23a31839..ee088b57a 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -450,8 +450,8 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk= @@ -470,8 +470,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp4 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 h1:dxpo41/N6m2R//9fmqKgqYZL2k0rQSE1NvQteIQ9pGA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13/go.mod h1:tN2BDTM6RDyQsae6JRvaaA14LVxDsRaLU3Ea2MRUBjg= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 h1:CFMHYo6/OQpLTycJGQIze2pchNeJQ7L2TQC6fDo4JGY= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4/go.mod h1:PvXgFxPcfve6yBiWNIO/fqAMvGVC9W7qN6M2vIj4zmY= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 h1:1qp8hglOZ2trgdC6ZdxrSsrN3YPy/hFSICDlUUHi+bA= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7/go.mod h1:MCkyXY+mUB1RGC+fqVQrUKxsLumryR9RKkcJHYBQa+k= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index e59844786..081dae306 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -326,6 +326,9 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com if err != nil { return nil, err } + if config.PostProcessSpec != nil { + return config.PostProcessSpec(a.spec) + } return a.spec, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 2e15e163c..e4ce843b0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -164,6 +164,9 @@ type OpenAPIV3Config struct { // It is an optional function to customize model names. GetDefinitionName func(name string) (string, spec.Extensions) + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) + // SecuritySchemes is list of all security schemes for OpenAPI service. SecuritySchemes spec3.SecuritySchemes diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 799d866d5..9887d185b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index e5374b08c..139a05729 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -630,7 +630,7 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.17.0 +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector @@ -763,7 +763,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.3 => k8s.io/api v0.29.0 +# k8s.io/api v0.30.0 => k8s.io/api v0.29.0 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -824,7 +824,7 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.29.3 => k8s.io/apimachinery v0.29.0 +# k8s.io/apimachinery v0.30.0 => k8s.io/apimachinery v0.29.0 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1035,7 +1035,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.3 => k8s.io/client-go v0.29.0 +# k8s.io/client-go v0.30.0 => k8s.io/client-go v0.29.0 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1430,8 +1430,8 @@ k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 -## explicit; go 1.19 +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +## explicit; go 1.20 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 k8s.io/kube-openapi/pkg/builder3/util @@ -1662,7 +1662,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.4 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From 7019fd9080f34cba7632636e8fa3e3abb5d0ac95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 16:41:25 +0000 Subject: [PATCH 137/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.13 to 0.0.14. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.13...pkg/azclient/v0.0.14) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 10d98db99..c7c9a2025 100644 --- a/go.mod +++ b/go.mod @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.3 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index ee088b57a..65569feb1 100644 --- a/go.sum +++ b/go.sum @@ -468,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp43Ayaz78K+yK0bVrOVMvQ6QrIW/XTi0r3uAA5g= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 h1:dxpo41/N6m2R//9fmqKgqYZL2k0rQSE1NvQteIQ9pGA= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13/go.mod h1:tN2BDTM6RDyQsae6JRvaaA14LVxDsRaLU3Ea2MRUBjg= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 h1:NHDvAou5ETbwBfelH8bm9kUE+ZAAUJfg0q8Tgg5jiTc= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14/go.mod h1:bqqPhB7U0S6RXO96JgtbWtFsihcsRts37DTH5TtfdTI= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 h1:1qp8hglOZ2trgdC6ZdxrSsrN3YPy/hFSICDlUUHi+bA= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7/go.mod h1:MCkyXY+mUB1RGC+fqVQrUKxsLumryR9RKkcJHYBQa+k= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index 139a05729..ef2af23bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1619,7 +1619,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.13 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From 18e67e28650ca3c2f04a98cde31f1192207f6cdc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Apr 2024 17:04:21 +0000 Subject: [PATCH 138/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.14 to 0.0.15. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.14...pkg/azclient/v0.0.15) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 6 +- go.sum | 12 +- .../compute/armcompute/v5/CHANGELOG.md | 21 ++ .../compute/armcompute/v5/assets.json | 2 +- .../compute/armcompute/v5/autorest.md | 8 +- .../armcompute/v5/availabilitysets_client.go | 28 +-- .../v5/capacityreservationgroups_client.go | 27 +-- .../v5/capacityreservations_client.go | 26 +-- .../compute/armcompute/v5/constants.go | 46 ++++- .../v5/dedicatedhostgroups_client.go | 24 +-- .../armcompute/v5/dedicatedhosts_client.go | 42 ++-- .../compute/armcompute/v5/images_client.go | 30 +-- .../armcompute/v5/loganalytics_client.go | 12 +- .../compute/armcompute/v5/models.go | 115 +++++++++-- .../compute/armcompute/v5/models_serde.go | 183 ++++++++++++++++++ .../armcompute/v5/operations_client.go | 4 +- .../compute/armcompute/v5/options.go | 7 + .../v5/proximityplacementgroups_client.go | 24 +-- .../v5/restorepointcollections_client.go | 26 +-- .../armcompute/v5/restorepoints_client.go | 16 +- .../armcompute/v5/sshpublickeys_client.go | 28 +-- .../compute/armcompute/v5/usage_client.go | 4 +- .../virtualmachineextensionimages_client.go | 12 +- .../v5/virtualmachineextensions_client.go | 26 +-- .../v5/virtualmachineimages_client.go | 24 +-- .../v5/virtualmachineimagesedgezone_client.go | 20 +- .../v5/virtualmachineruncommands_client.go | 34 ++-- .../armcompute/v5/virtualmachines_client.go | 138 ++++++------- ...virtualmachinescalesetextensions_client.go | 26 +-- ...almachinescalesetrollingupgrades_client.go | 22 +-- .../v5/virtualmachinescalesets_client.go | 132 ++++++------- ...rtualmachinescalesetvmextensions_client.go | 26 +-- ...tualmachinescalesetvmruncommands_client.go | 26 +-- .../v5/virtualmachinescalesetvms_client.go | 98 +++++----- .../v5/virtualmachinesizes_client.go | 4 +- vendor/modules.txt | 6 +- 36 files changed, 799 insertions(+), 486 deletions(-) diff --git a/go.mod b/go.mod index c7c9a2025..474b0edbe 100644 --- a/go.mod +++ b/go.mod @@ -43,8 +43,8 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect @@ -156,7 +156,7 @@ require ( k8s.io/kubelet v0.29.3 // indirect k8s.io/pod-security-admission v0.29.0 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 65569feb1..cf8636711 100644 --- a/go.sum +++ b/go.sum @@ -9,14 +9,14 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 h1:DWlwvVV5r/Wy1561nZ3wrpI1/vDIBRY/Wd1HWaRBZWA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0/go.mod h1:E7ltexgRDmeJ0fJWv0D/HLwY2xbDdN+uv+X2uZtOx3w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 h1:0nGmzwBv5ougvzfGPCO2ljFRHvun57KpNrVCMrlk0ns= @@ -468,8 +468,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp43Ayaz78K+yK0bVrOVMvQ6QrIW/XTi0r3uAA5g= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 h1:NHDvAou5ETbwBfelH8bm9kUE+ZAAUJfg0q8Tgg5jiTc= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14/go.mod h1:bqqPhB7U0S6RXO96JgtbWtFsihcsRts37DTH5TtfdTI= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 h1:dAvrFZFLIVoOXTSrU5jTLyUdNlyaExrBqD2YvyRjpI4= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15/go.mod h1:gd4xk5narc09IQvDR8xyr5FyDmB0Y49+bLOUlMaGecs= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 h1:1qp8hglOZ2trgdC6ZdxrSsrN3YPy/hFSICDlUUHi+bA= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7/go.mod h1:MCkyXY+mUB1RGC+fqVQrUKxsLumryR9RKkcJHYBQa+k= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md index 9aeb8a37e..b363a53ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md @@ -1,5 +1,26 @@ # Release History +## 5.7.0 (2024-04-26) +### Features Added + +- New value `DiffDiskPlacementNvmeDisk` added to enum type `DiffDiskPlacement` +- New value `DiskCreateOptionTypesCopy`, `DiskCreateOptionTypesRestore` added to enum type `DiskCreateOptionTypes` +- New enum type `ResourceIDOptionsForGetCapacityReservationGroups` with values `ResourceIDOptionsForGetCapacityReservationGroupsAll`, `ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription`, `ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription` +- New struct `EventGridAndResourceGraph` +- New struct `ScheduledEventsAdditionalPublishingTargets` +- New struct `ScheduledEventsPolicy` +- New struct `UserInitiatedReboot` +- New struct `UserInitiatedRedeploy` +- New field `ResourceIDsOnly` in struct `CapacityReservationGroupsClientListBySubscriptionOptions` +- New field `SourceResource` in struct `DataDisk` +- New field `Caching`, `DeleteOption`, `DiskEncryptionSet`, `WriteAcceleratorEnabled` in struct `DataDisksToAttach` +- New field `ScheduledEventsPolicy` in struct `VirtualMachineProperties` +- New field `ScheduledEventsPolicy` in struct `VirtualMachineScaleSetProperties` +- New field `ForceUpdateOSDiskForEphemeral` in struct `VirtualMachineScaleSetReimageParameters` +- New field `DiffDiskSettings` in struct `VirtualMachineScaleSetUpdateOSDisk` +- New field `ForceUpdateOSDiskForEphemeral` in struct `VirtualMachineScaleSetVMReimageParameters` + + ## 5.6.0 (2024-03-22) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json index 9a0140774..db9b10d43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_79d4095593" + "Tag": "go/resourcemanager/compute/armcompute_6e7bd6d107" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md index 71b871d52..5f836a658 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/e4009d2f8d3bf0271757e522c7d1c1997e193d44/specification/compute/resource-manager/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/compute/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 5.6.0 -tag: package-2023-07-03 +module-version: 5.7.0 +tag: package-2024-03-01 ``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go index 55572022a..b9b8536f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go @@ -47,7 +47,7 @@ func NewAvailabilitySetsClient(subscriptionID string, credential azcore.TokenCre // CreateOrUpdate - Create or update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Create Availability Set operation. @@ -95,7 +95,7 @@ func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Re // Delete - Delete an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientDeleteOptions contains the optional parameters for the AvailabilitySetsClient.Delete method. @@ -161,7 +161,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -170,7 +170,7 @@ func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, r // Get - Retrieves information about an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientGetOptions contains the optional parameters for the AvailabilitySetsClient.Get method. @@ -216,7 +216,7 @@ func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -233,7 +233,7 @@ func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (Av // NewListPager - Lists all availability sets in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - AvailabilitySetsClientListOptions contains the optional parameters for the AvailabilitySetsClient.NewListPager // method. @@ -276,7 +276,7 @@ func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (A // NewListAvailableSizesPager - Lists all available virtual machine sizes that can be used to create a new virtual machine // in an existing availability set. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - options - AvailabilitySetsClientListAvailableSizesOptions contains the optional parameters for the AvailabilitySetsClient.NewListAvailableSizesPager @@ -343,7 +343,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *AvailabilitySetsClient) listAvailableSizesHandleResponse(resp *htt // NewListBySubscriptionPager - Lists all availability sets in a subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - AvailabilitySetsClientListBySubscriptionOptions contains the optional parameters for the AvailabilitySetsClient.NewListBySubscriptionPager // method. func (client *AvailabilitySetsClient) NewListBySubscriptionPager(options *AvailabilitySetsClientListBySubscriptionOptions) *runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse] { @@ -401,7 +401,7 @@ func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx contex if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -419,7 +419,7 @@ func (client *AvailabilitySetsClient) listBySubscriptionHandleResponse(resp *htt // Update - Update an availability set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - availabilitySetName - The name of the availability set. // - parameters - Parameters supplied to the Update Availability Set operation. @@ -466,7 +466,7 @@ func (client *AvailabilitySetsClient) updateCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go index cc26cd0d4..51a410658 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go @@ -49,7 +49,7 @@ func NewCapacityReservationGroupsClient(subscriptionID string, credential azcore // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Create capacity reservation Group. @@ -97,7 +97,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -120,7 +120,7 @@ func (client *CapacityReservationGroupsClient) createOrUpdateHandleResponse(resp // the reservation group have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientDeleteOptions contains the optional parameters for the CapacityReservationGroupsClient.Delete @@ -166,7 +166,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -175,7 +175,7 @@ func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.C // Get - The operation that retrieves information about a capacity reservation group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationGroupsClientGetOptions contains the optional parameters for the CapacityReservationGroupsClient.Get @@ -225,7 +225,7 @@ func (client *CapacityReservationGroupsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -243,7 +243,7 @@ func (client *CapacityReservationGroupsClient) getHandleResponse(resp *http.Resp // NewListByResourceGroupPager - Lists all of the capacity reservation groups in the specified resource group. Use the nextLink // property in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - CapacityReservationGroupsClientListByResourceGroupOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListByResourceGroupPager // method. @@ -289,7 +289,7 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest( if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -307,7 +307,7 @@ func (client *CapacityReservationGroupsClient) listByResourceGroupHandleResponse // NewListBySubscriptionPager - Lists all of the capacity reservation groups in the subscription. Use the nextLink property // in the response to get the next page of capacity reservation groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - CapacityReservationGroupsClientListBySubscriptionOptions contains the optional parameters for the CapacityReservationGroupsClient.NewListBySubscriptionPager // method. func (client *CapacityReservationGroupsClient) NewListBySubscriptionPager(options *CapacityReservationGroupsClientListBySubscriptionOptions) *runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse] { @@ -348,7 +348,10 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(c if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") + if options != nil && options.ResourceIDsOnly != nil { + reqQP.Set("resourceIdsOnly", string(*options.ResourceIDsOnly)) + } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +370,7 @@ func (client *CapacityReservationGroupsClient) listBySubscriptionHandleResponse( // sharing profile may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - parameters - Parameters supplied to the Update capacity reservation Group operation. @@ -415,7 +418,7 @@ func (client *CapacityReservationGroupsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go index 951b0ac91..aaf86c3a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go @@ -49,7 +49,7 @@ func NewCapacityReservationsClient(subscriptionID string, credential azcore.Toke // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -78,7 +78,7 @@ func (client *CapacityReservationsClient) BeginCreateOrUpdate(ctx context.Contex // details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginCreateOrUpdate" @@ -124,7 +124,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -138,7 +138,7 @@ func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx contex // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -166,7 +166,7 @@ func (client *CapacityReservationsClient) BeginDelete(ctx context.Context, resou // https://aka.ms/CapacityReservation for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) deleteOperation(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginDelete" @@ -212,7 +212,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -221,7 +221,7 @@ func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Contex // Get - The operation that retrieves information about the capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -276,7 +276,7 @@ func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -294,7 +294,7 @@ func (client *CapacityReservationsClient) getHandleResponse(resp *http.Response) // NewListByCapacityReservationGroupPager - Lists all of the capacity reservations in the specified capacity reservation group. // Use the nextLink property in the response to get the next page of capacity reservations. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - options - CapacityReservationsClientListByCapacityReservationGroupOptions contains the optional parameters for the CapacityReservationsClient.NewListByCapacityReservationGroupPager @@ -342,7 +342,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -360,7 +360,7 @@ func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleRe // BeginUpdate - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - capacityReservationGroupName - The name of the capacity reservation group. // - capacityReservationName - The name of the capacity reservation. @@ -387,7 +387,7 @@ func (client *CapacityReservationsClient) BeginUpdate(ctx context.Context, resou // Update - The operation to update a capacity reservation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *CapacityReservationsClient) update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "CapacityReservationsClient.BeginUpdate" @@ -433,7 +433,7 @@ func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go index 73f3548e4..ab719dfdf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go @@ -10,7 +10,7 @@ package armcompute const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" - moduleVersion = "v5.6.0" + moduleVersion = "v5.7.0" ) type AccessLevel string @@ -333,15 +333,17 @@ func PossibleDiffDiskOptionsValues() []DiffDiskOptions { } // DiffDiskPlacement - Specifies the ephemeral disk placement for operating system disk. This property can be used by user -// in the request to choose the location i.e, cache disk or resource disk space for Ephemeral OS disk -// provisioning. For more information on Ephemeral OS disk size requirements, please refer Ephemeral OS disk size requirements -// for Windows VM at +// in the request to choose the location i.e, cache disk, resource disk or nvme disk space for +// Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer Ephemeral OS +// disk size requirements for Windows VM at // https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VM at -// https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements +// https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. Minimum api-version for NvmeDisk: +// 2024-03-01. type DiffDiskPlacement string const ( DiffDiskPlacementCacheDisk DiffDiskPlacement = "CacheDisk" + DiffDiskPlacementNvmeDisk DiffDiskPlacement = "NvmeDisk" DiffDiskPlacementResourceDisk DiffDiskPlacement = "ResourceDisk" ) @@ -349,6 +351,7 @@ const ( func PossibleDiffDiskPlacementValues() []DiffDiskPlacement { return []DiffDiskPlacement{ DiffDiskPlacementCacheDisk, + DiffDiskPlacementNvmeDisk, DiffDiskPlacementResourceDisk, } } @@ -425,25 +428,31 @@ func PossibleDiskCreateOptionValues() []DiskCreateOption { } } -// DiskCreateOptionTypes - Specifies how the virtual machine should be created. Possible values are: Attach. This value is -// used when you are using a specialized disk to create the virtual machine. FromImage. This value is used -// when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference -// element described above. If you are using a marketplace image, you also -// use the plan element previously described. +// DiskCreateOptionTypes - Specifies how the virtual machine disk should be created. Possible values are Attach: This value +// is used when you are using a specialized disk to create the virtual machine. FromImage: This value is +// used when you are using an image to create the virtual machine. If you are using a platform image, you should also use +// the imageReference element described above. If you are using a marketplace image, +// you should also use the plan element previously described. Empty: This value is used when creating an empty data disk. +// Copy: This value is used to create a data disk from a snapshot or another disk. +// Restore: This value is used to create a data disk from a disk restore point. type DiskCreateOptionTypes string const ( DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + DiskCreateOptionTypesCopy DiskCreateOptionTypes = "Copy" DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" + DiskCreateOptionTypesRestore DiskCreateOptionTypes = "Restore" ) // PossibleDiskCreateOptionTypesValues returns the possible values for the DiskCreateOptionTypes const type. func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { return []DiskCreateOptionTypes{ DiskCreateOptionTypesAttach, + DiskCreateOptionTypesCopy, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage, + DiskCreateOptionTypesRestore, } } @@ -1678,6 +1687,23 @@ func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { } } +type ResourceIDOptionsForGetCapacityReservationGroups string + +const ( + ResourceIDOptionsForGetCapacityReservationGroupsAll ResourceIDOptionsForGetCapacityReservationGroups = "All" + ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription ResourceIDOptionsForGetCapacityReservationGroups = "CreatedInSubscription" + ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription ResourceIDOptionsForGetCapacityReservationGroups = "SharedWithSubscription" +) + +// PossibleResourceIDOptionsForGetCapacityReservationGroupsValues returns the possible values for the ResourceIDOptionsForGetCapacityReservationGroups const type. +func PossibleResourceIDOptionsForGetCapacityReservationGroupsValues() []ResourceIDOptionsForGetCapacityReservationGroups { + return []ResourceIDOptionsForGetCapacityReservationGroups{ + ResourceIDOptionsForGetCapacityReservationGroupsAll, + ResourceIDOptionsForGetCapacityReservationGroupsCreatedInSubscription, + ResourceIDOptionsForGetCapacityReservationGroupsSharedWithSubscription, + } +} + // ResourceIdentityType - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' // includes both an implicitly created identity and a set of user assigned identities. The type 'None' // will remove any identities from the virtual machine scale set. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go index 99434bfab..ae2c4f366 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go @@ -48,7 +48,7 @@ func NewDedicatedHostGroupsClient(subscriptionID string, credential azcore.Token // see Dedicated Host Documentation [https://go.microsoft.com/fwlink/?linkid=2082596] // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Create Dedicated Host Group. @@ -96,7 +96,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -117,7 +117,7 @@ func (client *DedicatedHostGroupsClient) createOrUpdateHandleResponse(resp *http // Delete - Delete a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientDeleteOptions contains the optional parameters for the DedicatedHostGroupsClient.Delete @@ -163,7 +163,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -172,7 +172,7 @@ func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context // Get - Retrieves information about a dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostGroupsClientGetOptions contains the optional parameters for the DedicatedHostGroupsClient.Get method. @@ -221,7 +221,7 @@ func (client *DedicatedHostGroupsClient) getCreateRequest(ctx context.Context, r if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -239,7 +239,7 @@ func (client *DedicatedHostGroupsClient) getHandleResponse(resp *http.Response) // NewListByResourceGroupPager - Lists all of the dedicated host groups in the specified resource group. Use the nextLink // property in the response to get the next page of dedicated host groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - DedicatedHostGroupsClientListByResourceGroupOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListByResourceGroupPager // method. @@ -282,7 +282,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +300,7 @@ func (client *DedicatedHostGroupsClient) listByResourceGroupHandleResponse(resp // NewListBySubscriptionPager - Lists all of the dedicated host groups in the subscription. Use the nextLink property in the // response to get the next page of dedicated host groups. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - DedicatedHostGroupsClientListBySubscriptionOptions contains the optional parameters for the DedicatedHostGroupsClient.NewListBySubscriptionPager // method. func (client *DedicatedHostGroupsClient) NewListBySubscriptionPager(options *DedicatedHostGroupsClientListBySubscriptionOptions) *runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse] { @@ -338,7 +338,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *DedicatedHostGroupsClient) listBySubscriptionHandleResponse(resp * // Update - Update an dedicated host group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - parameters - Parameters supplied to the Update Dedicated Host Group operation. @@ -404,7 +404,7 @@ func (client *DedicatedHostGroupsClient) updateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go index b50a3ec65..66ae1f99b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go @@ -47,7 +47,7 @@ func NewDedicatedHostsClient(subscriptionID string, credential azcore.TokenCrede // BeginCreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -74,7 +74,7 @@ func (client *DedicatedHostsClient) BeginCreateOrUpdate(ctx context.Context, res // CreateOrUpdate - Create or update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) createOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDelete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -158,7 +158,7 @@ func (client *DedicatedHostsClient) BeginDelete(ctx context.Context, resourceGro // Delete - Delete a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) deleteOperation(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, res // Get - Retrieves information about a dedicated host. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -267,7 +267,7 @@ func (client *DedicatedHostsClient) getCreateRequest(ctx context.Context, resour if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (Dedi // NewListAvailableSizesPager - Lists all available dedicated host sizes to which the specified dedicated host can be resized. // NOTE: The dedicated host sizes provided can be used to only scale up the existing dedicated host. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -339,7 +339,7 @@ func (client *DedicatedHostsClient) listAvailableSizesCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *DedicatedHostsClient) listAvailableSizesHandleResponse(resp *http. // NewListByHostGroupPager - Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property // in the response to get the next page of dedicated hosts. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - options - DedicatedHostsClientListByHostGroupOptions contains the optional parameters for the DedicatedHostsClient.NewListByHostGroupPager @@ -405,7 +405,7 @@ func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -426,7 +426,7 @@ func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Res // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -455,7 +455,7 @@ func (client *DedicatedHostsClient) BeginRedeploy(ctx context.Context, resourceG // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) redeploy(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginRedeploy" @@ -501,7 +501,7 @@ func (client *DedicatedHostsClient) redeployCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -513,7 +513,7 @@ func (client *DedicatedHostsClient) redeployCreateRequest(ctx context.Context, r // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host. @@ -542,7 +542,7 @@ func (client *DedicatedHostsClient) BeginRestart(ctx context.Context, resourceGr // for more details. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) restart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginRestart" @@ -588,7 +588,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -597,7 +597,7 @@ func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, re // BeginUpdate - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - hostGroupName - The name of the dedicated host group. // - hostName - The name of the dedicated host . @@ -624,7 +624,7 @@ func (client *DedicatedHostsClient) BeginUpdate(ctx context.Context, resourceGro // Update - Update a dedicated host . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *DedicatedHostsClient) update(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "DedicatedHostsClient.BeginUpdate" @@ -670,7 +670,7 @@ func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go index 5c5ab8a93..5fecdbf1d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go @@ -47,7 +47,7 @@ func NewImagesClient(subscriptionID string, credential azcore.TokenCredential, o // BeginCreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Create Image operation. @@ -73,7 +73,7 @@ func (client *ImagesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro // CreateOrUpdate - Create or update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginCreateOrUpdate" @@ -115,7 +115,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -127,7 +127,7 @@ func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, res // BeginDelete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientBeginDeleteOptions contains the optional parameters for the ImagesClient.BeginDelete method. @@ -151,7 +151,7 @@ func (client *ImagesClient) BeginDelete(ctx context.Context, resourceGroupName s // Delete - Deletes an Image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginDelete" @@ -193,7 +193,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -202,7 +202,7 @@ func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGro // Get - Gets an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - options - ImagesClientGetOptions contains the optional parameters for the ImagesClient.Get method. @@ -251,7 +251,7 @@ func (client *ImagesClient) getCreateRequest(ctx context.Context, resourceGroupN if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -269,7 +269,7 @@ func (client *ImagesClient) getHandleResponse(resp *http.Response) (ImagesClient // NewListPager - Gets the list of Images in the subscription. Use nextLink property in the response to get the next page // of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - ImagesClientListOptions contains the optional parameters for the ImagesClient.NewListPager method. func (client *ImagesClient) NewListPager(options *ImagesClientListOptions) *runtime.Pager[ImagesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[ImagesClientListResponse]{ @@ -306,7 +306,7 @@ func (client *ImagesClient) listCreateRequest(ctx context.Context, options *Imag return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *ImagesClient) listHandleResponse(resp *http.Response) (ImagesClien // NewListByResourceGroupPager - Gets the list of images under a resource group. Use nextLink property in the response to // get the next page of Images. Do this till nextLink is null to fetch all the Images. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - ImagesClientListByResourceGroupOptions contains the optional parameters for the ImagesClient.NewListByResourceGroupPager // method. @@ -367,7 +367,7 @@ func (client *ImagesClient) listByResourceGroupCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -385,7 +385,7 @@ func (client *ImagesClient) listByResourceGroupHandleResponse(resp *http.Respons // BeginUpdate - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - imageName - The name of the image. // - parameters - Parameters supplied to the Update Image operation. @@ -410,7 +410,7 @@ func (client *ImagesClient) BeginUpdate(ctx context.Context, resourceGroupName s // Update - Update an image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *ImagesClient) update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "ImagesClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *ImagesClient) updateCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go index a48766ffd..a3b77f828 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go @@ -48,7 +48,7 @@ func NewLogAnalyticsClient(subscriptionID string, credential azcore.TokenCredent // to show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getRequestRateByInterval Api. // - options - LogAnalyticsClientBeginExportRequestRateByIntervalOptions contains the optional parameters for the LogAnalyticsClient.BeginExportRequestRateByInterval @@ -75,7 +75,7 @@ func (client *LogAnalyticsClient) BeginExportRequestRateByInterval(ctx context.C // show throttling activities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *LogAnalyticsClient) exportRequestRateByInterval(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportRequestRateByInterval" @@ -113,7 +113,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -126,7 +126,7 @@ func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx c // window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - parameters - Parameters supplied to the LogAnalytics getThrottledRequests Api. // - options - LogAnalyticsClientBeginExportThrottledRequestsOptions contains the optional parameters for the LogAnalyticsClient.BeginExportThrottledRequests @@ -152,7 +152,7 @@ func (client *LogAnalyticsClient) BeginExportThrottledRequests(ctx context.Conte // ExportThrottledRequests - Export logs that show total throttled Api requests for this subscription in the given time window. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *LogAnalyticsClient) exportThrottledRequests(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*http.Response, error) { var err error const operationName = "LogAnalyticsClient.BeginExportThrottledRequests" @@ -190,7 +190,7 @@ func (client *LogAnalyticsClient) exportThrottledRequestsCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go index 6f755a27e..1fad0acd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go @@ -368,7 +368,7 @@ type CapacityReservationGroupInstanceView struct { // READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group. CapacityReservations []*CapacityReservationInstanceViewWithName - // READ-ONLY; List of the subscriptions that the capacity reservation group is shared with. Note: Minimum api-version: 2023-09-01. + // READ-ONLY; List of the subscriptions that the capacity reservation group is shared with. Note: Minimum api-version: 2024-03-01. // Please refer to https://aka.ms/computereservationsharing for more details. SharedSubscriptionIDs []*SubResourceReadOnly } @@ -388,7 +388,7 @@ type CapacityReservationGroupProperties struct { // Specifies the settings to enable sharing across subscriptions for the capacity reservation group resource. Pls. keep in // mind the capacity reservation group resource generally can be shared across // subscriptions belonging to a single azure AAD tenant or cross AAD tenant if there is a trust relationship established between - // the AAD tenants. Note: Minimum api-version: 2023-09-01. Please refer to + // the AAD tenants. Note: Minimum api-version: 2024-03-01. Please refer to // https://aka.ms/computereservationsharing for more details. SharingProfile *ResourceSharingProfile @@ -1064,11 +1064,13 @@ type CreationData struct { // DataDisk - Describes a data disk. type DataDisk struct { - // REQUIRED; Specifies how the virtual machine should be created. Possible values are: Attach. This value is used when you - // are using a specialized disk to create the virtual machine. FromImage. This value is used - // when you are using an image to create the virtual machine. If you are using a platform image, you should also use the imageReference - // element described above. If you are using a marketplace image, you - // should also use the plan element previously described. + // REQUIRED; Specifies how the virtual machine disk should be created. Possible values are Attach: This value is used when + // you are using a specialized disk to create the virtual machine. FromImage: This value is + // used when you are using an image to create the virtual machine data disk. If you are using a platform image, you should + // also use the imageReference element described above. If you are using a + // marketplace image, you should also use the plan element previously described. Empty: This value is used when creating an + // empty data disk. Copy: This value is used to create a data disk from a snapshot + // or another disk. Restore: This value is used to create a data disk from a disk restore point. CreateOption *DiskCreateOptionTypes // REQUIRED; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and @@ -1110,6 +1112,9 @@ type DataDisk struct { // The disk name. Name *string + // The source resource identifier. It can be a snapshot, or disk restore point from which to create a disk. + SourceResource *APIEntityReference + // Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset ToBeDetached *bool @@ -1153,10 +1158,25 @@ type DataDisksToAttach struct { // REQUIRED; ID of the managed data disk. DiskID *string + // Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The defaulting behavior is: None for + // Standard storage. ReadOnly for Premium storage. + Caching *CachingTypes + + // Specifies whether data disk should be deleted or detached upon VM deletion. Possible values are: Delete. If this value + // is used, the data disk is deleted when VM is deleted. Detach. If this value is + // used, the data disk is retained after VM is deleted. The default value is set to Detach. + DeleteOption *DiskDeleteOptionTypes + + // Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters + // The logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be // unique for each data disk attached to a VM. If not specified, lun would be auto // assigned. Lun *int32 + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool } // DataDisksToDetach - Describes the data disk to be detached. @@ -1408,11 +1428,12 @@ type DiffDiskSettings struct { // Specifies the ephemeral disk settings for operating system disk. Option *DiffDiskOptions - // Specifies the ephemeral disk placement for operating system disk. Possible values are: CacheDisk, ResourceDisk. The defaulting - // behavior is: CacheDisk if one is configured for the VM size otherwise - // ResourceDisk is used. Refer to the VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes + // Specifies the ephemeral disk placement for operating system disk. Possible values are: CacheDisk, ResourceDisk, NvmeDisk. + // The defaulting behavior is: CacheDisk if one is configured for the VM size + // otherwise ResourceDisk or NvmeDisk is used. Refer to the VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes // and Linux VM at - // https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. + // https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Minimum api-version + // for NvmeDisk: 2024-03-01. Placement *DiffDiskPlacement } @@ -2075,6 +2096,12 @@ type EncryptionSettingsElement struct { KeyEncryptionKey *KeyVaultAndKeyReference } +// EventGridAndResourceGraph - Specifies eventGridAndResourceGraph related Scheduled Event related configurations. +type EventGridAndResourceGraph struct { + // Specifies if event grid and resource graph is enabled for Scheduled event related configurations. + Enable *bool +} + // ExtendedLocation - The complex type of the extended location. type ExtendedLocation struct { // The name of the extended location. @@ -3449,11 +3476,11 @@ type NetworkProfile struct { // disks, see About disks and VHDs for Azure virtual machines // [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. type OSDisk struct { - // REQUIRED; Specifies how the virtual machine should be created. Possible values are: Attach. This value is used when you - // are using a specialized disk to create the virtual machine. FromImage. This value is used - // when you are using an image to create the virtual machine. If you are using a platform image, you should also use the imageReference - // element described above. If you are using a marketplace image, you - // should also use the plan element previously described. + // REQUIRED; Specifies how the virtual machine disk should be created. Possible values are Attach: This value is used when + // you are using a specialized disk to create the virtual machine. FromImage: This value is + // used when you are using an image to create the virtual machine. If you are using a platform image, you should also use + // the imageReference element described above. If you are using a marketplace image, + // you should also use the plan element previously described. CreateOption *DiskCreateOptionTypes // Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The defaulting behavior is: None for @@ -4074,7 +4101,7 @@ type ProximityPlacementGroupUpdate struct { Tags map[string]*string } -// ProxyAgentSettings - Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. +// ProxyAgentSettings - Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2024-03-01. type ProxyAgentSettings struct { // Specifies whether ProxyAgent feature should be enabled on the virtual machine or virtual machine scale set. Enabled *bool @@ -4431,7 +4458,7 @@ type ResourceSKUsResult struct { type ResourceSharingProfile struct { // Specifies an array of subscription resource IDs that capacity reservation group is shared with. Note: Minimum api-version: - // 2023-09-01. Please refer to https://aka.ms/computereservationsharing for more + // 2024-03-01. Please refer to https://aka.ms/computereservationsharing for more // details. SubscriptionIDs []*SubResource } @@ -5121,6 +5148,24 @@ type ScaleInPolicy struct { Rules []*VirtualMachineScaleSetScaleInRules } +type ScheduledEventsAdditionalPublishingTargets struct { + // The configuration parameters used while creating eventGridAndResourceGraph Scheduled Event setting. + EventGridAndResourceGraph *EventGridAndResourceGraph +} + +// ScheduledEventsPolicy - Specifies Redeploy, Reboot and ScheduledEventsAdditionalPublishingTargets Scheduled Event related +// configurations. +type ScheduledEventsPolicy struct { + // The configuration parameters used while publishing scheduledEventsAdditionalPublishingTargets. + ScheduledEventsAdditionalPublishingTargets *ScheduledEventsAdditionalPublishingTargets + + // The configuration parameters used while creating userInitiatedReboot scheduled event setting creation. + UserInitiatedReboot *UserInitiatedReboot + + // The configuration parameters used while creating userInitiatedRedeploy scheduled event setting creation. + UserInitiatedRedeploy *UserInitiatedRedeploy +} + type ScheduledEventsProfile struct { // Specifies OS Image Scheduled Event related configurations. OSImageNotificationProfile *OSImageNotificationProfile @@ -5150,7 +5195,7 @@ type SecurityProfile struct { // Specifies the Managed Identity used by ADE to get access token for keyvault operations. EncryptionIdentity *EncryptionIdentity - // Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2023-09-01. + // Specifies ProxyAgent settings while creating the virtual machine. Minimum api-version: 2024-03-01. ProxyAgentSettings *ProxyAgentSettings // Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. The @@ -5942,6 +5987,18 @@ type UserAssignedIdentitiesValue struct { PrincipalID *string } +// UserInitiatedReboot - Specifies Reboot related Scheduled Event related configurations. +type UserInitiatedReboot struct { + // Specifies Reboot Scheduled Event related configurations. + AutomaticallyApprove *bool +} + +// UserInitiatedRedeploy - Specifies Redeploy related Scheduled Event related configurations. +type UserInitiatedRedeploy struct { + // Specifies Redeploy Scheduled Event related configurations. + AutomaticallyApprove *bool +} + // VMDiskSecurityProfile - Specifies the security profile settings for the managed disk. Note: It can only be set for Confidential // VMs. type VMDiskSecurityProfile struct { @@ -6786,6 +6843,10 @@ type VirtualMachineProperties struct { // 2018-04-01. ProximityPlacementGroup *SubResource + // Specifies Redeploy, Reboot and ScheduledEventsAdditionalPublishingTargets Scheduled Event related configurations for the + // virtual machine. + ScheduledEventsPolicy *ScheduledEventsPolicy + // Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile @@ -7596,6 +7657,9 @@ type VirtualMachineScaleSetProperties struct { // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. ScaleInPolicy *ScaleInPolicy + // The ScheduledEventsPolicy. + ScheduledEventsPolicy *ScheduledEventsPolicy + // When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup // is true, it may be modified to false. However, if singlePlacementGroup // is false, it may not be modified to true. @@ -7680,6 +7744,9 @@ type VirtualMachineScaleSetReimageParameters struct { // is reimaged to the existing version of OS Disk. ExactVersion *string + // Parameter to force update ephemeral OS disk for a virtual machine scale set VM + ForceUpdateOSDiskForEphemeral *bool + // The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation // being performed on all virtual machines in the virtual machine scale set. InstanceIDs []*string @@ -7873,6 +7940,9 @@ type VirtualMachineScaleSetUpdateOSDisk struct { // delete option for Ephemeral OS Disk. DeleteOption *DiskDeleteOptionTypes + // Specifies the ephemeral disk Settings for the operating system disk used by the virtual machine scale set. + DiffDiskSettings *DiffDiskSettings + // Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a // virtual machine image. // diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than 1023 @@ -8281,8 +8351,8 @@ type VirtualMachineScaleSetVMProfile struct { UserData *string // READ-ONLY; Specifies the time in which this VM profile for the Virtual Machine Scale Set was created. Minimum API version - // for this property is 2023-09-01. This value will be added to VMSS Flex VM tags when - // creating/updating the VMSS VM Profile with minimum api-version 2023-09-01. + // for this property is 2024-03-01. This value will be added to VMSS Flex VM tags when + // creating/updating the VMSS VM Profile with minimum api-version 2024-03-01. TimeCreated *time.Time } @@ -8379,6 +8449,9 @@ type VirtualMachineScaleSetVMReimageParameters struct { // is reimaged to the existing version of OS Disk. ExactVersion *string + // Parameter to force update ephemeral OS disk for a virtual machine scale set VM + ForceUpdateOSDiskForEphemeral *bool + // Specifies information required for reimaging the non-ephemeral OS disk. OSProfile *OSProfileProvisioningData diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go index 16bc25e59..1bdfcb442 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go @@ -2484,6 +2484,7 @@ func (d DataDisk) MarshalJSON() ([]byte, error) { populate(objectMap, "lun", d.Lun) populate(objectMap, "managedDisk", d.ManagedDisk) populate(objectMap, "name", d.Name) + populate(objectMap, "sourceResource", d.SourceResource) populate(objectMap, "toBeDetached", d.ToBeDetached) populate(objectMap, "vhd", d.Vhd) populate(objectMap, "writeAcceleratorEnabled", d.WriteAcceleratorEnabled) @@ -2532,6 +2533,9 @@ func (d *DataDisk) UnmarshalJSON(data []byte) error { case "name": err = unpopulate(val, "Name", &d.Name) delete(rawMsg, key) + case "sourceResource": + err = unpopulate(val, "SourceResource", &d.SourceResource) + delete(rawMsg, key) case "toBeDetached": err = unpopulate(val, "ToBeDetached", &d.ToBeDetached) delete(rawMsg, key) @@ -2610,8 +2614,12 @@ func (d *DataDiskImageEncryption) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type DataDisksToAttach. func (d DataDisksToAttach) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "caching", d.Caching) + populate(objectMap, "deleteOption", d.DeleteOption) + populate(objectMap, "diskEncryptionSet", d.DiskEncryptionSet) populate(objectMap, "diskId", d.DiskID) populate(objectMap, "lun", d.Lun) + populate(objectMap, "writeAcceleratorEnabled", d.WriteAcceleratorEnabled) return json.Marshal(objectMap) } @@ -2624,12 +2632,24 @@ func (d *DataDisksToAttach) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "caching": + err = unpopulate(val, "Caching", &d.Caching) + delete(rawMsg, key) + case "deleteOption": + err = unpopulate(val, "DeleteOption", &d.DeleteOption) + delete(rawMsg, key) + case "diskEncryptionSet": + err = unpopulate(val, "DiskEncryptionSet", &d.DiskEncryptionSet) + delete(rawMsg, key) case "diskId": err = unpopulate(val, "DiskID", &d.DiskID) delete(rawMsg, key) case "lun": err = unpopulate(val, "Lun", &d.Lun) delete(rawMsg, key) + case "writeAcceleratorEnabled": + err = unpopulate(val, "WriteAcceleratorEnabled", &d.WriteAcceleratorEnabled) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", d, err) @@ -4749,6 +4769,33 @@ func (e *EncryptionSettingsElement) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type EventGridAndResourceGraph. +func (e EventGridAndResourceGraph) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enable", e.Enable) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EventGridAndResourceGraph. +func (e *EventGridAndResourceGraph) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enable": + err = unpopulate(val, "Enable", &e.Enable) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ExtendedLocation. func (e ExtendedLocation) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -12311,6 +12358,68 @@ func (s *ScaleInPolicy) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ScheduledEventsAdditionalPublishingTargets. +func (s ScheduledEventsAdditionalPublishingTargets) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eventGridAndResourceGraph", s.EventGridAndResourceGraph) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduledEventsAdditionalPublishingTargets. +func (s *ScheduledEventsAdditionalPublishingTargets) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eventGridAndResourceGraph": + err = unpopulate(val, "EventGridAndResourceGraph", &s.EventGridAndResourceGraph) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ScheduledEventsPolicy. +func (s ScheduledEventsPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "scheduledEventsAdditionalPublishingTargets", s.ScheduledEventsAdditionalPublishingTargets) + populate(objectMap, "userInitiatedReboot", s.UserInitiatedReboot) + populate(objectMap, "userInitiatedRedeploy", s.UserInitiatedRedeploy) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ScheduledEventsPolicy. +func (s *ScheduledEventsPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "scheduledEventsAdditionalPublishingTargets": + err = unpopulate(val, "ScheduledEventsAdditionalPublishingTargets", &s.ScheduledEventsAdditionalPublishingTargets) + delete(rawMsg, key) + case "userInitiatedReboot": + err = unpopulate(val, "UserInitiatedReboot", &s.UserInitiatedReboot) + delete(rawMsg, key) + case "userInitiatedRedeploy": + err = unpopulate(val, "UserInitiatedRedeploy", &s.UserInitiatedRedeploy) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ScheduledEventsProfile. func (s ScheduledEventsProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -14456,6 +14565,60 @@ func (u *UserAssignedIdentitiesValue) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type UserInitiatedReboot. +func (u UserInitiatedReboot) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "automaticallyApprove", u.AutomaticallyApprove) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserInitiatedReboot. +func (u *UserInitiatedReboot) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "automaticallyApprove": + err = unpopulate(val, "AutomaticallyApprove", &u.AutomaticallyApprove) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserInitiatedRedeploy. +func (u UserInitiatedRedeploy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "automaticallyApprove", u.AutomaticallyApprove) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserInitiatedRedeploy. +func (u *UserInitiatedRedeploy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "automaticallyApprove": + err = unpopulate(val, "AutomaticallyApprove", &u.AutomaticallyApprove) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type VMDiskSecurityProfile. func (v VMDiskSecurityProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -16143,6 +16306,7 @@ func (v VirtualMachineProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "priority", v.Priority) populate(objectMap, "provisioningState", v.ProvisioningState) populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "scheduledEventsPolicy", v.ScheduledEventsPolicy) populate(objectMap, "scheduledEventsProfile", v.ScheduledEventsProfile) populate(objectMap, "securityProfile", v.SecurityProfile) populate(objectMap, "storageProfile", v.StorageProfile) @@ -16219,6 +16383,9 @@ func (v *VirtualMachineProperties) UnmarshalJSON(data []byte) error { case "proximityPlacementGroup": err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) delete(rawMsg, key) + case "scheduledEventsPolicy": + err = unpopulate(val, "ScheduledEventsPolicy", &v.ScheduledEventsPolicy) + delete(rawMsg, key) case "scheduledEventsProfile": err = unpopulate(val, "ScheduledEventsProfile", &v.ScheduledEventsProfile) delete(rawMsg, key) @@ -17737,6 +17904,7 @@ func (v VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) populate(objectMap, "resiliencyPolicy", v.ResiliencyPolicy) populate(objectMap, "scaleInPolicy", v.ScaleInPolicy) + populate(objectMap, "scheduledEventsPolicy", v.ScheduledEventsPolicy) populate(objectMap, "singlePlacementGroup", v.SinglePlacementGroup) populate(objectMap, "spotRestorePolicy", v.SpotRestorePolicy) populateDateTimeRFC3339(objectMap, "timeCreated", v.TimeCreated) @@ -17795,6 +17963,9 @@ func (v *VirtualMachineScaleSetProperties) UnmarshalJSON(data []byte) error { case "scaleInPolicy": err = unpopulate(val, "ScaleInPolicy", &v.ScaleInPolicy) delete(rawMsg, key) + case "scheduledEventsPolicy": + err = unpopulate(val, "ScheduledEventsPolicy", &v.ScheduledEventsPolicy) + delete(rawMsg, key) case "singlePlacementGroup": err = unpopulate(val, "SinglePlacementGroup", &v.SinglePlacementGroup) delete(rawMsg, key) @@ -17941,6 +18112,7 @@ func (v *VirtualMachineScaleSetPublicIPAddressConfigurationProperties) Unmarshal func (v VirtualMachineScaleSetReimageParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "exactVersion", v.ExactVersion) + populate(objectMap, "forceUpdateOSDiskForEphemeral", v.ForceUpdateOSDiskForEphemeral) populate(objectMap, "instanceIds", v.InstanceIDs) populate(objectMap, "osProfile", v.OSProfile) populate(objectMap, "tempDisk", v.TempDisk) @@ -17959,6 +18131,9 @@ func (v *VirtualMachineScaleSetReimageParameters) UnmarshalJSON(data []byte) err case "exactVersion": err = unpopulate(val, "ExactVersion", &v.ExactVersion) delete(rawMsg, key) + case "forceUpdateOSDiskForEphemeral": + err = unpopulate(val, "ForceUpdateOSDiskForEphemeral", &v.ForceUpdateOSDiskForEphemeral) + delete(rawMsg, key) case "instanceIds": err = unpopulate(val, "InstanceIDs", &v.InstanceIDs) delete(rawMsg, key) @@ -18356,6 +18531,7 @@ func (v VirtualMachineScaleSetUpdateOSDisk) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "caching", v.Caching) populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "diffDiskSettings", v.DiffDiskSettings) populate(objectMap, "diskSizeGB", v.DiskSizeGB) populate(objectMap, "image", v.Image) populate(objectMap, "managedDisk", v.ManagedDisk) @@ -18379,6 +18555,9 @@ func (v *VirtualMachineScaleSetUpdateOSDisk) UnmarshalJSON(data []byte) error { case "deleteOption": err = unpopulate(val, "DeleteOption", &v.DeleteOption) delete(rawMsg, key) + case "diffDiskSettings": + err = unpopulate(val, "DiffDiskSettings", &v.DiffDiskSettings) + delete(rawMsg, key) case "diskSizeGB": err = unpopulate(val, "DiskSizeGB", &v.DiskSizeGB) delete(rawMsg, key) @@ -19327,6 +19506,7 @@ func (v *VirtualMachineScaleSetVMProtectionPolicy) UnmarshalJSON(data []byte) er func (v VirtualMachineScaleSetVMReimageParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "exactVersion", v.ExactVersion) + populate(objectMap, "forceUpdateOSDiskForEphemeral", v.ForceUpdateOSDiskForEphemeral) populate(objectMap, "osProfile", v.OSProfile) populate(objectMap, "tempDisk", v.TempDisk) return json.Marshal(objectMap) @@ -19344,6 +19524,9 @@ func (v *VirtualMachineScaleSetVMReimageParameters) UnmarshalJSON(data []byte) e case "exactVersion": err = unpopulate(val, "ExactVersion", &v.ExactVersion) delete(rawMsg, key) + case "forceUpdateOSDiskForEphemeral": + err = unpopulate(val, "ForceUpdateOSDiskForEphemeral", &v.ForceUpdateOSDiskForEphemeral) + delete(rawMsg, key) case "osProfile": err = unpopulate(val, "OSProfile", &v.OSProfile) delete(rawMsg, key) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go index f7fded74a..7c438a561 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Gets a list of compute operations. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -73,7 +73,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go index 0c363663a..5ea3446d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go @@ -84,6 +84,13 @@ type CapacityReservationGroupsClientListBySubscriptionOptions struct { // VM Instance or both resource Ids which are associated to capacity // reservation group in the response. Expand *ExpandTypesForGetCapacityReservationGroups + + // The query option to fetch Capacity Reservation Group Resource Ids. + // 'CreatedInSubscription' enables fetching Resource Ids for all capacity reservation group resources created in the subscription. + // 'SharedWithSubscription' enables fetching Resource Ids for all capacity reservation group resources shared with the subscription. + // 'All' enables fetching Resource Ids for all capacity reservation group resources shared with the subscription and created + // in the subscription. + ResourceIDsOnly *ResourceIDOptionsForGetCapacityReservationGroups } // CapacityReservationGroupsClientUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.Update diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go index c35ed6b0c..c18659203 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go @@ -47,7 +47,7 @@ func NewProximityPlacementGroupsClient(subscriptionID string, credential azcore. // CreateOrUpdate - Create or update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Create Proximity Placement Group operation. @@ -95,7 +95,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -116,7 +116,7 @@ func (client *ProximityPlacementGroupsClient) createOrUpdateHandleResponse(resp // Delete - Delete a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientDeleteOptions contains the optional parameters for the ProximityPlacementGroupsClient.Delete @@ -162,7 +162,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Co // Get - Retrieves information about a proximity placement group . // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - options - ProximityPlacementGroupsClientGetOptions contains the optional parameters for the ProximityPlacementGroupsClient.Get @@ -218,7 +218,7 @@ func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.IncludeColocationStatus != nil { reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) } @@ -238,7 +238,7 @@ func (client *ProximityPlacementGroupsClient) getHandleResponse(resp *http.Respo // NewListByResourceGroupPager - Lists all proximity placement groups in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - ProximityPlacementGroupsClientListByResourceGroupOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListByResourceGroupPager // method. @@ -281,7 +281,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -298,7 +298,7 @@ func (client *ProximityPlacementGroupsClient) listByResourceGroupHandleResponse( // NewListBySubscriptionPager - Lists all proximity placement groups in a subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - ProximityPlacementGroupsClientListBySubscriptionOptions contains the optional parameters for the ProximityPlacementGroupsClient.NewListBySubscriptionPager // method. func (client *ProximityPlacementGroupsClient) NewListBySubscriptionPager(options *ProximityPlacementGroupsClientListBySubscriptionOptions) *runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse] { @@ -336,7 +336,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *ProximityPlacementGroupsClient) listBySubscriptionHandleResponse(r // Update - Update a proximity placement group. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - proximityPlacementGroupName - The name of the proximity placement group. // - parameters - Parameters supplied to the Update Proximity Placement Group operation. @@ -402,7 +402,7 @@ func (client *ProximityPlacementGroupsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go index b07477e6d..f53e3a7d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go @@ -48,7 +48,7 @@ func NewRestorePointCollectionsClient(subscriptionID string, credential azcore.T // for more details. When updating a restore point collection, only tags may be modified. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Create or Update restore point collection operation. @@ -96,7 +96,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -118,7 +118,7 @@ func (client *RestorePointCollectionsClient) createOrUpdateHandleResponse(resp * // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - options - RestorePointCollectionsClientBeginDeleteOptions contains the optional parameters for the RestorePointCollectionsClient.BeginDelete @@ -144,7 +144,7 @@ func (client *RestorePointCollectionsClient) BeginDelete(ctx context.Context, re // points. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointCollectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointCollectionsClient.BeginDelete" @@ -186,7 +186,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -195,7 +195,7 @@ func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Con // Get - The operation to get the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - options - RestorePointCollectionsClientGetOptions contains the optional parameters for the RestorePointCollectionsClient.Get @@ -245,7 +245,7 @@ func (client *RestorePointCollectionsClient) getCreateRequest(ctx context.Contex if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -262,7 +262,7 @@ func (client *RestorePointCollectionsClient) getHandleResponse(resp *http.Respon // NewListPager - Gets the list of restore point collections in a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - RestorePointCollectionsClientListOptions contains the optional parameters for the RestorePointCollectionsClient.NewListPager // method. @@ -305,7 +305,7 @@ func (client *RestorePointCollectionsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -324,7 +324,7 @@ func (client *RestorePointCollectionsClient) listHandleResponse(resp *http.Respo // to get the next page of restore point collections. Do this till nextLink is not null to fetch all // the restore point collections. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - RestorePointCollectionsClientListAllOptions contains the optional parameters for the RestorePointCollectionsClient.NewListAllPager // method. func (client *RestorePointCollectionsClient) NewListAllPager(options *RestorePointCollectionsClientListAllOptions) *runtime.Pager[RestorePointCollectionsClientListAllResponse] { @@ -362,7 +362,7 @@ func (client *RestorePointCollectionsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -380,7 +380,7 @@ func (client *RestorePointCollectionsClient) listAllHandleResponse(resp *http.Re // Update - The operation to update the restore point collection. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - parameters - Parameters supplied to the Update restore point collection operation. @@ -428,7 +428,7 @@ func (client *RestorePointCollectionsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go index 7b6364f92..dffafe2da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go @@ -47,7 +47,7 @@ func NewRestorePointsClient(subscriptionID string, credential azcore.TokenCreden // BeginCreate - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -74,7 +74,7 @@ func (client *RestorePointsClient) BeginCreate(ctx context.Context, resourceGrou // Create - The operation to create the restore point. Updating properties of an existing restore point is not allowed // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointsClient) create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginCreate" @@ -120,7 +120,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -132,7 +132,7 @@ func (client *RestorePointsClient) createCreateRequest(ctx context.Context, reso // BeginDelete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the Restore Point Collection. // - restorePointName - The name of the restore point. @@ -158,7 +158,7 @@ func (client *RestorePointsClient) BeginDelete(ctx context.Context, resourceGrou // Delete - The operation to delete the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *RestorePointsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "RestorePointsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, reso // Get - The operation to get the restore point. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - restorePointCollectionName - The name of the restore point collection. // - restorePointName - The name of the restore point. @@ -267,7 +267,7 @@ func (client *RestorePointsClient) getCreateRequest(ctx context.Context, resourc if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go index a577d55d2..58d8d18ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go @@ -47,7 +47,7 @@ func NewSSHPublicKeysClient(subscriptionID string, credential azcore.TokenCreden // Create - Creates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to create the SSH public key. @@ -94,7 +94,7 @@ func (client *SSHPublicKeysClient) createCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -115,7 +115,7 @@ func (client *SSHPublicKeysClient) createHandleResponse(resp *http.Response) (SS // Delete - Delete an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientDeleteOptions contains the optional parameters for the SSHPublicKeysClient.Delete method. @@ -160,7 +160,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -171,7 +171,7 @@ func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, reso // SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair @@ -218,7 +218,7 @@ func (client *SSHPublicKeysClient) generateKeyPairCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Parameters != nil { @@ -242,7 +242,7 @@ func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Resp // Get - Retrieves information about an SSH public key. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - options - SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. @@ -288,7 +288,7 @@ func (client *SSHPublicKeysClient) getCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -306,7 +306,7 @@ func (client *SSHPublicKeysClient) getHandleResponse(resp *http.Response) (SSHPu // NewListByResourceGroupPager - Lists all of the SSH public keys in the specified resource group. Use the nextLink property // in the response to get the next page of SSH public keys. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - SSHPublicKeysClientListByResourceGroupOptions contains the optional parameters for the SSHPublicKeysClient.NewListByResourceGroupPager // method. @@ -349,7 +349,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -367,7 +367,7 @@ func (client *SSHPublicKeysClient) listByResourceGroupHandleResponse(resp *http. // NewListBySubscriptionPager - Lists all of the SSH public keys in the subscription. Use the nextLink property in the response // to get the next page of SSH public keys. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - SSHPublicKeysClientListBySubscriptionOptions contains the optional parameters for the SSHPublicKeysClient.NewListBySubscriptionPager // method. func (client *SSHPublicKeysClient) NewListBySubscriptionPager(options *SSHPublicKeysClientListBySubscriptionOptions) *runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse] { @@ -405,7 +405,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -423,7 +423,7 @@ func (client *SSHPublicKeysClient) listBySubscriptionHandleResponse(resp *http.R // Update - Updates a new SSH public key resource. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - sshPublicKeyName - The name of the SSH public key. // - parameters - Parameters supplied to update the SSH public key. @@ -470,7 +470,7 @@ func (client *SSHPublicKeysClient) updateCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go index afb6852b0..e251486e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go @@ -47,7 +47,7 @@ func NewUsageClient(subscriptionID string, credential azcore.TokenCredential, op // NewListPager - Gets, for the specified location, the current compute resource usage information as well as the limits for // compute resources under the subscription. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which resource usage is queried. // - options - UsageClientListOptions contains the optional parameters for the UsageClient.NewListPager method. func (client *UsageClient) NewListPager(location string, options *UsageClientListOptions) *runtime.Pager[UsageClientListResponse] { @@ -89,7 +89,7 @@ func (client *UsageClient) listCreateRequest(ctx context.Context, location strin return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go index e8ff65ad0..a337d2453 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineExtensionImagesClient(subscriptionID string, credential az // Get - Gets a virtual machine extension image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientGetOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.Get // method. @@ -102,7 +102,7 @@ func (client *VirtualMachineExtensionImagesClient) getCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionImagesClient) getHandleResponse(resp *http. // ListTypes - Gets a list of virtual machine extension image types. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListTypesOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListTypes // method. @@ -166,7 +166,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -184,7 +184,7 @@ func (client *VirtualMachineExtensionImagesClient) listTypesHandleResponse(resp // ListVersions - Gets a list of virtual machine extension image versions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineExtensionImagesClientListVersionsOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListVersions // method. @@ -243,7 +243,7 @@ func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go index 9d658bb4e..bd62e8137 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineExtensionsClient(subscriptionID string, credential azcore. // BeginCreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be created or updated. // - vmExtensionName - The name of the virtual machine extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineExtensionsClient) BeginCreateOrUpdate(ctx context.Co // CreateOrUpdate - The operation to create or update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx co // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be deleted. // - vmExtensionName - The name of the virtual machine extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineExtensionsClient) BeginDelete(ctx context.Context, r // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - vmExtensionName - The name of the virtual machine extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineExtensionsClient) getCreateRequest(ctx context.Conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -286,7 +286,7 @@ func (client *VirtualMachineExtensionsClient) getHandleResponse(resp *http.Respo // List - The operation to get all extensions of a Virtual Machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the extension. // - options - VirtualMachineExtensionsClientListOptions contains the optional parameters for the VirtualMachineExtensionsClient.List @@ -336,7 +336,7 @@ func (client *VirtualMachineExtensionsClient) listCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -354,7 +354,7 @@ func (client *VirtualMachineExtensionsClient) listHandleResponse(resp *http.Resp // BeginUpdate - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the extension should be updated. // - vmExtensionName - The name of the virtual machine extension. @@ -381,7 +381,7 @@ func (client *VirtualMachineExtensionsClient) BeginUpdate(ctx context.Context, r // Update - The operation to update the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineExtensionsClient) update(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineExtensionsClient.BeginUpdate" @@ -427,7 +427,7 @@ func (client *VirtualMachineExtensionsClient) updateCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go index 2e0e80d8b..d2557d0d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesClient(subscriptionID string, credential azcore.Toke // Get - Gets a virtual machine image. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -110,7 +110,7 @@ func (client *VirtualMachineImagesClient) getCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -128,7 +128,7 @@ func (client *VirtualMachineImagesClient) getHandleResponse(resp *http.Response) // List - Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -194,7 +194,7 @@ func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *VirtualMachineImagesClient) listHandleResponse(resp *http.Response // ListByEdgeZone - Gets a list of all virtual machine image versions for the specified edge zone // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesClientListByEdgeZoneOptions contains the optional parameters for the VirtualMachineImagesClient.ListByEdgeZone @@ -259,7 +259,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -277,7 +277,7 @@ func (client *VirtualMachineImagesClient) listByEdgeZoneHandleResponse(resp *htt // ListOffers - Gets a list of virtual machine image offers for the specified location and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - options - VirtualMachineImagesClientListOffersOptions contains the optional parameters for the VirtualMachineImagesClient.ListOffers @@ -324,7 +324,7 @@ func (client *VirtualMachineImagesClient) listOffersCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -342,7 +342,7 @@ func (client *VirtualMachineImagesClient) listOffersHandleResponse(resp *http.Re // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - options - VirtualMachineImagesClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesClient.ListPublishers // method. @@ -384,7 +384,7 @@ func (client *VirtualMachineImagesClient) listPublishersCreateRequest(ctx contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -402,7 +402,7 @@ func (client *VirtualMachineImagesClient) listPublishersHandleResponse(resp *htt // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - publisherName - A valid image publisher. // - offer - A valid image publisher offer. @@ -454,7 +454,7 @@ func (client *VirtualMachineImagesClient) listSKUsCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go index 756fe975e..8558798f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineImagesEdgeZoneClient(subscriptionID string, credential azc // Get - Gets a virtual machine image in an edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -115,7 +115,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -133,7 +133,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.R // List - Gets a list of all virtual machine image versions for the specified location, edge zone, publisher, offer, and SKU. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -204,7 +204,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context. if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -222,7 +222,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listHandleResponse(resp *http. // ListOffers - Gets a list of virtual machine image offers for the specified location, edge zone and publisher. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -274,7 +274,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -292,7 +292,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listOffersHandleResponse(resp // ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location and edge zone. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - options - VirtualMachineImagesEdgeZoneClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListPublishers @@ -339,7 +339,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -357,7 +357,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listPublishersHandleResponse(r // ListSKUs - Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and offer. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The name of a supported Azure region. // - edgeZone - The name of the edge zone. // - publisherName - A valid image publisher. @@ -414,7 +414,7 @@ func (client *VirtualMachineImagesEdgeZoneClient) listSKUsCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go index 348c59ade..c5ef4894c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineRunCommandsClient(subscriptionID string, credential azcore // BeginCreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be created or updated. // - runCommandName - The name of the virtual machine run command. @@ -74,7 +74,7 @@ func (client *VirtualMachineRunCommandsClient) BeginCreateOrUpdate(ctx context.C // CreateOrUpdate - The operation to create or update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx c // BeginDelete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be deleted. // - runCommandName - The name of the virtual machine run command. @@ -158,7 +158,7 @@ func (client *VirtualMachineRunCommandsClient) BeginDelete(ctx context.Context, // Delete - The operation to delete the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.C // Get - Gets specific run command for a subscription in a location. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which run commands is queried. // - commandID - The command id. // - options - VirtualMachineRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineRunCommandsClient.Get @@ -260,7 +260,7 @@ func (client *VirtualMachineRunCommandsClient) getCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -278,7 +278,7 @@ func (client *VirtualMachineRunCommandsClient) getHandleResponse(resp *http.Resp // GetByVirtualMachine - The operation to get the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - runCommandName - The name of the virtual machine run command. @@ -333,7 +333,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineCreateRequest( if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -350,7 +350,7 @@ func (client *VirtualMachineRunCommandsClient) getByVirtualMachineHandleResponse // NewListPager - Lists all available run commands for a subscription in a location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which run commands is queried. // - options - VirtualMachineRunCommandsClientListOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListPager // method. @@ -393,7 +393,7 @@ func (client *VirtualMachineRunCommandsClient) listCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -410,7 +410,7 @@ func (client *VirtualMachineRunCommandsClient) listHandleResponse(resp *http.Res // NewListByVirtualMachinePager - The operation to get all run commands of a Virtual Machine. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine containing the run command. // - options - VirtualMachineRunCommandsClientListByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.NewListByVirtualMachinePager @@ -461,7 +461,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineCreateRequest if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -479,7 +479,7 @@ func (client *VirtualMachineRunCommandsClient) listByVirtualMachineHandleRespons // BeginUpdate - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine where the run command should be updated. // - runCommandName - The name of the virtual machine run command. @@ -506,7 +506,7 @@ func (client *VirtualMachineRunCommandsClient) BeginUpdate(ctx context.Context, // Update - The operation to update the run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineRunCommandsClient.BeginUpdate" @@ -552,7 +552,7 @@ func (client *VirtualMachineRunCommandsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go index a816e9a9d..dd55aabc3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go @@ -48,7 +48,7 @@ func NewVirtualMachinesClient(subscriptionID string, credential azcore.TokenCred // BeginAssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginAssessPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginAssessPatches @@ -74,7 +74,7 @@ func (client *VirtualMachinesClient) BeginAssessPatches(ctx context.Context, res // AssessPatches - Assess patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) assessPatches(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginAssessPatches" @@ -116,7 +116,7 @@ func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -125,7 +125,7 @@ func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Cont // BeginAttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the attach and detach data disks operation on the virtual machine. @@ -152,7 +152,7 @@ func (client *VirtualMachinesClient) BeginAttachDetachDataDisks(ctx context.Cont // AttachDetachDataDisks - Attach and detach data disks to/from the virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmName string, parameters AttachDetachDataDisksRequest, options *VirtualMachinesClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginAttachDetachDataDisks" @@ -194,7 +194,7 @@ func (client *VirtualMachinesClient) attachDetachDataDisksCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -207,7 +207,7 @@ func (client *VirtualMachinesClient) attachDetachDataDisksCreateRequest(ctx cont // similar VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Capture Virtual Machine operation. @@ -235,7 +235,7 @@ func (client *VirtualMachinesClient) BeginCapture(ctx context.Context, resourceG // VMs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) capture(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCapture" @@ -277,7 +277,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -290,7 +290,7 @@ func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, r // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginConvertToManagedDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginConvertToManagedDisks @@ -316,7 +316,7 @@ func (client *VirtualMachinesClient) BeginConvertToManagedDisks(ctx context.Cont // before invoking this operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) convertToManagedDisks(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginConvertToManagedDisks" @@ -358,7 +358,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -368,7 +368,7 @@ func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx cont // during virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Create Virtual Machine operation. @@ -395,7 +395,7 @@ func (client *VirtualMachinesClient) BeginCreateOrUpdate(ctx context.Context, re // virtual machine creation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginCreateOrUpdate" @@ -437,7 +437,7 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { @@ -456,7 +456,7 @@ func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Con // resources that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeallocateOptions contains the optional parameters for the VirtualMachinesClient.BeginDeallocate @@ -482,7 +482,7 @@ func (client *VirtualMachinesClient) BeginDeallocate(ctx context.Context, resour // that this virtual machine uses. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) deallocate(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDeallocate" @@ -524,7 +524,7 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } @@ -536,7 +536,7 @@ func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context // BeginDelete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginDeleteOptions contains the optional parameters for the VirtualMachinesClient.BeginDelete @@ -561,7 +561,7 @@ func (client *VirtualMachinesClient) BeginDelete(ctx context.Context, resourceGr // Delete - The operation to delete a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginDelete" @@ -603,7 +603,7 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } @@ -619,7 +619,7 @@ func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, re // [https://docs.microsoft.com/azure/virtual-machines/linux/capture-image]. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGeneralizeOptions contains the optional parameters for the VirtualMachinesClient.Generalize @@ -665,7 +665,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -674,7 +674,7 @@ func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context // Get - Retrieves information about the model view or the instance view of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientGetOptions contains the optional parameters for the VirtualMachinesClient.Get method. @@ -723,7 +723,7 @@ func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resou if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -741,7 +741,7 @@ func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (Vir // BeginInstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - installPatchesInput - Input for InstallPatches as directly received by the API @@ -768,7 +768,7 @@ func (client *VirtualMachinesClient) BeginInstallPatches(ctx context.Context, re // InstallPatches - Installs patches on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) installPatches(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginInstallPatches" @@ -810,7 +810,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, installPatchesInput); err != nil { @@ -822,7 +822,7 @@ func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Con // InstanceView - Retrieves information about the run-time state of a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientInstanceViewOptions contains the optional parameters for the VirtualMachinesClient.InstanceView @@ -869,7 +869,7 @@ func (client *VirtualMachinesClient) instanceViewCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -887,7 +887,7 @@ func (client *VirtualMachinesClient) instanceViewHandleResponse(resp *http.Respo // NewListPager - Lists all of the virtual machines in the specified resource group. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachinesClientListOptions contains the optional parameters for the VirtualMachinesClient.NewListPager // method. @@ -936,7 +936,7 @@ func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, reso if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -954,7 +954,7 @@ func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (Vi // NewListAllPager - Lists all of the virtual machines in the specified subscription. Use the nextLink property in the response // to get the next page of virtual machines. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - VirtualMachinesClientListAllOptions contains the optional parameters for the VirtualMachinesClient.NewListAllPager // method. func (client *VirtualMachinesClient) NewListAllPager(options *VirtualMachinesClientListAllOptions) *runtime.Pager[VirtualMachinesClientListAllResponse] { @@ -998,7 +998,7 @@ func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, o if options != nil && options.Filter != nil { reqQP.Set("$filter", *options.Filter) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.StatusOnly != nil { reqQP.Set("statusOnly", *options.StatusOnly) } @@ -1018,7 +1018,7 @@ func (client *VirtualMachinesClient) listAllHandleResponse(resp *http.Response) // NewListAvailableSizesPager - Lists all available virtual machine sizes to which the specified virtual machine can be resized. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientListAvailableSizesOptions contains the optional parameters for the VirtualMachinesClient.NewListAvailableSizesPager @@ -1067,7 +1067,7 @@ func (client *VirtualMachinesClient) listAvailableSizesCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1084,7 +1084,7 @@ func (client *VirtualMachinesClient) listAvailableSizesHandleResponse(resp *http // NewListByLocationPager - Gets all the virtual machines under the specified subscription for the specified location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which virtual machines under the subscription are queried. // - options - VirtualMachinesClientListByLocationOptions contains the optional parameters for the VirtualMachinesClient.NewListByLocationPager // method. @@ -1127,7 +1127,7 @@ func (client *VirtualMachinesClient) listByLocationCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1145,7 +1145,7 @@ func (client *VirtualMachinesClient) listByLocationHandleResponse(resp *http.Res // BeginPerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachinesClient.BeginPerformMaintenance @@ -1170,7 +1170,7 @@ func (client *VirtualMachinesClient) BeginPerformMaintenance(ctx context.Context // PerformMaintenance - The operation to perform maintenance on a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) performMaintenance(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPerformMaintenance" @@ -1212,7 +1212,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1222,7 +1222,7 @@ func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context // provisioned resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginPowerOffOptions contains the optional parameters for the VirtualMachinesClient.BeginPowerOff @@ -1248,7 +1248,7 @@ func (client *VirtualMachinesClient) BeginPowerOff(ctx context.Context, resource // resources. You are still charged for this virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) powerOff(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginPowerOff" @@ -1290,7 +1290,7 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } @@ -1302,7 +1302,7 @@ func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, // BeginReapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReapplyOptions contains the optional parameters for the VirtualMachinesClient.BeginReapply @@ -1327,7 +1327,7 @@ func (client *VirtualMachinesClient) BeginReapply(ctx context.Context, resourceG // Reapply - The operation to reapply a virtual machine's state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) reapply(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReapply" @@ -1369,7 +1369,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1378,7 +1378,7 @@ func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, r // BeginRedeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRedeployOptions contains the optional parameters for the VirtualMachinesClient.BeginRedeploy @@ -1403,7 +1403,7 @@ func (client *VirtualMachinesClient) BeginRedeploy(ctx context.Context, resource // Redeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) redeploy(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRedeploy" @@ -1445,7 +1445,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1458,7 +1458,7 @@ func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginReimageOptions contains the optional parameters for the VirtualMachinesClient.BeginReimage @@ -1487,7 +1487,7 @@ func (client *VirtualMachinesClient) BeginReimage(ctx context.Context, resourceG // will be deleted after reimage. The deleteOption of the OS disk should be updated accordingly before performing the reimage. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) reimage(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginReimage" @@ -1529,7 +1529,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Parameters != nil { @@ -1544,7 +1544,7 @@ func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, r // BeginRestart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginRestartOptions contains the optional parameters for the VirtualMachinesClient.BeginRestart @@ -1569,7 +1569,7 @@ func (client *VirtualMachinesClient) BeginRestart(ctx context.Context, resourceG // Restart - The operation to restart a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) restart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRestart" @@ -1611,7 +1611,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1620,7 +1620,7 @@ func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, r // RetrieveBootDiagnosticsData - The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachinesClient.RetrieveBootDiagnosticsData @@ -1667,7 +1667,7 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } @@ -1688,7 +1688,7 @@ func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataHandleResponse(r // BeginRunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Run command operation. @@ -1715,7 +1715,7 @@ func (client *VirtualMachinesClient) BeginRunCommand(ctx context.Context, resour // RunCommand - Run command on the VM. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) runCommand(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginRunCommand" @@ -1757,7 +1757,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1769,7 +1769,7 @@ func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context // SimulateEviction - The operation to simulate the eviction of spot virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientSimulateEvictionOptions contains the optional parameters for the VirtualMachinesClient.SimulateEviction @@ -1815,7 +1815,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1824,7 +1824,7 @@ func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.C // BeginStart - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - options - VirtualMachinesClientBeginStartOptions contains the optional parameters for the VirtualMachinesClient.BeginStart @@ -1849,7 +1849,7 @@ func (client *VirtualMachinesClient) BeginStart(ctx context.Context, resourceGro // Start - The operation to start a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) start(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginStart" @@ -1891,7 +1891,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1900,7 +1900,7 @@ func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, res // BeginUpdate - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmName - The name of the virtual machine. // - parameters - Parameters supplied to the Update Virtual Machine operation. @@ -1926,7 +1926,7 @@ func (client *VirtualMachinesClient) BeginUpdate(ctx context.Context, resourceGr // Update - The operation to update a virtual machine. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachinesClient) update(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachinesClient.BeginUpdate" @@ -1968,7 +1968,7 @@ func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go index 585b42657..81ddfad6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string, credential // BeginCreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -74,7 +74,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginCreateOrUpdate(ctx co // CreateOrUpdate - The operation to create or update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate" @@ -120,7 +120,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateReques // BeginDelete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be deleted. // - vmssExtensionName - The name of the VM scale set extension. @@ -158,7 +158,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginDelete(ctx context.Co // Delete - The operation to delete the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginDelete" @@ -204,7 +204,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -213,7 +213,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx co // Get - The operation to get the extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - vmssExtensionName - The name of the VM scale set extension. @@ -268,7 +268,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getCreateRequest(ctx conte if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -285,7 +285,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) getHandleResponse(resp *ht // NewListPager - Gets a list of all extensions in a VM scale set. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set containing the extension. // - options - VirtualMachineScaleSetExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.NewListPager @@ -333,7 +333,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listCreateRequest(ctx cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -351,7 +351,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) listHandleResponse(resp *h // BeginUpdate - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be updated. // - vmssExtensionName - The name of the VM scale set extension. @@ -378,7 +378,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) BeginUpdate(ctx context.Co // Update - The operation to update an extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetExtensionsClient.BeginUpdate" @@ -424,7 +424,7 @@ func (client *VirtualMachineScaleSetExtensionsClient) updateCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go index 9ef8a1902..41e01d5d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string, crede // BeginCancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginCancel @@ -72,7 +72,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginCancel(ctx conte // Cancel - Cancels the current virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) cancel(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginCancel" @@ -114,7 +114,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -123,7 +123,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(c // GetLatest - Gets the status of the latest virtual machine scale set rolling upgrade. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.GetLatest @@ -170,7 +170,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -190,7 +190,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestHandleRespon // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions contains the optional parameters @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartExtensionUp // are not affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade" @@ -259,7 +259,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -270,7 +270,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions contains the optional parameters for the @@ -297,7 +297,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartOSUpgrade(c // affected. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade" @@ -339,7 +339,7 @@ func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgradeCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go index 81f13d504..a03ff0c50 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go @@ -49,7 +49,7 @@ func NewVirtualMachineScaleSetsClient(subscriptionID string, credential azcore.T // scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetsClient) BeginApproveRollingUpgrade(ctx cont // set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginApproveRollingUpgrade" @@ -117,7 +117,7 @@ func (client *VirtualMachineScaleSetsClient) approveRollingUpgradeCreateRequest( return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -132,7 +132,7 @@ func (client *VirtualMachineScaleSetsClient) approveRollingUpgradeCreateRequest( // ConvertToSinglePlacementGroup - Converts SinglePlacementGroup property to false for a existing virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for ConvertToSinglePlacementGroup API. @@ -179,7 +179,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -191,7 +191,7 @@ func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreate // BeginCreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetsClient) BeginCreateOrUpdate(ctx context.Con // CreateOrUpdate - Create or update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginCreateOrUpdate" @@ -259,7 +259,7 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { @@ -279,7 +279,7 @@ func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx con // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeallocate @@ -306,7 +306,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeallocate(ctx context.Context // scale set deallocates. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeallocate" @@ -348,7 +348,7 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.Hibernate != nil { reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) } @@ -366,7 +366,7 @@ func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context // BeginDelete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDelete @@ -391,7 +391,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDelete(ctx context.Context, re // Delete - Deletes a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDelete" @@ -433,7 +433,7 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } @@ -445,7 +445,7 @@ func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Con // BeginDeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -471,7 +471,7 @@ func (client *VirtualMachineScaleSetsClient) BeginDeleteInstances(ctx context.Co // DeleteInstances - Deletes virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) deleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginDeleteInstances" @@ -513,7 +513,7 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } @@ -529,7 +529,7 @@ func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx co // service fabric virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - platformUpdateDomain - The platform update domain for which a manual recovery walk is requested @@ -577,7 +577,7 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.PlacementGroupID != nil { reqQP.Set("placementGroupId", *options.PlacementGroupID) } @@ -602,7 +602,7 @@ func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformU // Get - Display information about a virtual machine scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetsClient.Get @@ -652,7 +652,7 @@ func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Contex if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -670,7 +670,7 @@ func (client *VirtualMachineScaleSetsClient) getHandleResponse(resp *http.Respon // GetInstanceView - Gets the status of a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetInstanceView @@ -717,7 +717,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -734,7 +734,7 @@ func (client *VirtualMachineScaleSetsClient) getInstanceViewHandleResponse(resp // NewGetOSUpgradeHistoryPager - Gets list of OS upgrades on a VM scale set instance. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager @@ -782,7 +782,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -799,7 +799,7 @@ func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryHandleResponse(r // NewListPager - Gets a list of all VM scale sets under a resource group. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - options - VirtualMachineScaleSetsClientListOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListPager // method. @@ -842,7 +842,7 @@ func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -861,7 +861,7 @@ func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Respo // nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is // null to fetch all the VM Scale Sets. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - options - VirtualMachineScaleSetsClientListAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListAllPager // method. func (client *VirtualMachineScaleSetsClient) NewListAllPager(options *VirtualMachineScaleSetsClientListAllOptions) *runtime.Pager[VirtualMachineScaleSetsClientListAllResponse] { @@ -899,7 +899,7 @@ func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -916,7 +916,7 @@ func (client *VirtualMachineScaleSetsClient) listAllHandleResponse(resp *http.Re // NewListByLocationPager - Gets all the VM scale sets under the specified subscription for the specified location. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location for which VM scale sets under the subscription are queried. // - options - VirtualMachineScaleSetsClientListByLocationOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListByLocationPager // method. @@ -959,7 +959,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -977,7 +977,7 @@ func (client *VirtualMachineScaleSetsClient) listByLocationHandleResponse(resp * // NewListSKUsPager - Gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances // allowed for each SKU. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientListSKUsOptions contains the optional parameters for the VirtualMachineScaleSetsClient.NewListSKUsPager @@ -1025,7 +1025,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1045,7 +1045,7 @@ func (client *VirtualMachineScaleSetsClient) listSKUsHandleResponse(resp *http.R // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPerformMaintenance @@ -1072,7 +1072,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPerformMaintenance(ctx context // details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPerformMaintenance" @@ -1114,7 +1114,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1131,7 +1131,7 @@ func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPowerOff @@ -1158,7 +1158,7 @@ func (client *VirtualMachineScaleSetsClient) BeginPowerOff(ctx context.Context, // avoid charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginPowerOff" @@ -1200,7 +1200,7 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } @@ -1218,7 +1218,7 @@ func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.C // BeginReapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReapplyOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReapply @@ -1244,7 +1244,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReapply(ctx context.Context, r // Reapply - Reapplies the Virtual Machine Scale Set Virtual Machine Profile to the Virtual Machine Instances // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reapply(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReapplyOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReapply" @@ -1286,7 +1286,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1296,7 +1296,7 @@ func (client *VirtualMachineScaleSetsClient) reapplyCreateRequest(ctx context.Co // them back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRedeploy @@ -1322,7 +1322,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRedeploy(ctx context.Context, // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRedeploy" @@ -1364,7 +1364,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1381,7 +1381,7 @@ func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.C // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimage @@ -1408,7 +1408,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimage(ctx context.Context, r // reset to initial state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimage" @@ -1450,7 +1450,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetReimageInput != nil { @@ -1466,7 +1466,7 @@ func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Co // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimageAll @@ -1492,7 +1492,7 @@ func (client *VirtualMachineScaleSetsClient) BeginReimageAll(ctx context.Context // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginReimageAll" @@ -1534,7 +1534,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1549,7 +1549,7 @@ func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context // BeginRestart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRestart @@ -1574,7 +1574,7 @@ func (client *VirtualMachineScaleSetsClient) BeginRestart(ctx context.Context, r // Restart - Restarts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginRestart" @@ -1616,7 +1616,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1631,7 +1631,7 @@ func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Co // BeginSetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the virtual machine scale set to create or update. // - parameters - The input object for SetOrchestrationServiceState API. @@ -1657,7 +1657,7 @@ func (client *VirtualMachineScaleSetsClient) BeginSetOrchestrationServiceState(c // SetOrchestrationServiceState - Changes ServiceState property for a given service // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceState(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState" @@ -1699,7 +1699,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1711,7 +1711,7 @@ func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateR // BeginStart - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginStart @@ -1736,7 +1736,7 @@ func (client *VirtualMachineScaleSetsClient) BeginStart(ctx context.Context, res // Start - Starts one or more virtual machines in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginStart" @@ -1778,7 +1778,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMInstanceIDs != nil { @@ -1793,7 +1793,7 @@ func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Cont // BeginUpdate - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set to create or update. // - parameters - The scale set object. @@ -1819,7 +1819,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdate(ctx context.Context, re // Update - Update a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdate" @@ -1861,7 +1861,7 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { @@ -1879,7 +1879,7 @@ func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Con // BeginUpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. @@ -1905,7 +1905,7 @@ func (client *VirtualMachineScaleSetsClient) BeginUpdateInstances(ctx context.Co // UpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetsClient) updateInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetsClient.BeginUpdateInstances" @@ -1947,7 +1947,7 @@ func (client *VirtualMachineScaleSetsClient) updateInstancesCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, vmInstanceIDs); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go index ff2347623..c16821d83 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string, credenti // BeginCreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequ // BeginDelete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginDelete(ctx context. // Delete - The operation to delete the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getCreateRequest(ctx con if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -301,7 +301,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) getHandleResponse(resp * // List - The operation to get all extensions of an instance in Virtual Machine Scaleset. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) BeginUpdate(ctx context. // Update - The operation to update the VMSS VM extension. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMExtensionsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMExtensionsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, extensionParameters); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go index 9736973fc..85e67fc55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go @@ -47,7 +47,7 @@ func NewVirtualMachineScaleSetVMRunCommandsClient(subscriptionID string, credent // BeginCreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -75,7 +75,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginCreateOrUpdate(ctx // CreateOrUpdate - The operation to create or update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate" @@ -125,7 +125,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { @@ -137,7 +137,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateReq // BeginDelete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -164,7 +164,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginDelete(ctx context // Delete - The operation to delete the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginDelete" @@ -214,7 +214,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -223,7 +223,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx // Get - The operation to get the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -283,7 +283,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getCreateRequest(ctx co if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -300,7 +300,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp // NewListPager - The operation to get all run commands of an instance in Virtual Machine Scaleset. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -356,7 +356,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listCreateRequest(ctx c if options != nil && options.Expand != nil { reqQP.Set("$expand", *options.Expand) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} return req, nil @@ -374,7 +374,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp // BeginUpdate - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -402,7 +402,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginUpdate(ctx context // Update - The operation to update the VMSS VM run command. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate" @@ -452,7 +452,7 @@ func (client *VirtualMachineScaleSetVMRunCommandsClient) updateCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, runCommand); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go index 0bd6fc83a..379ae96b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go @@ -48,7 +48,7 @@ func NewVirtualMachineScaleSetVMsClient(subscriptionID string, credential azcore // BeginApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -74,7 +74,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginApproveRollingUpgrade(ctx co // ApproveRollingUpgrade - Approve upgrade on deferred rolling upgrade for OS disk on a VM scale set instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginApproveRollingUpgradeOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginApproveRollingUpgrade" @@ -120,7 +120,7 @@ func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgradeCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -129,7 +129,7 @@ func (client *VirtualMachineScaleSetVMsClient) approveRollingUpgradeCreateReques // BeginAttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -157,7 +157,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginAttachDetachDataDisks(ctx co // AttachDetachDataDisks - Attach and detach data disks to/from a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisks(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters AttachDetachDataDisksRequest, options *VirtualMachineScaleSetVMsClientBeginAttachDetachDataDisksOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginAttachDetachDataDisks" @@ -203,7 +203,7 @@ func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisksCreateReques return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -217,7 +217,7 @@ func (client *VirtualMachineScaleSetVMsClient) attachDetachDataDisksCreateReques // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -245,7 +245,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDeallocate(ctx context.Conte // machine once it is deallocated. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDeallocate" @@ -291,7 +291,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -300,7 +300,7 @@ func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx conte // BeginDelete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -326,7 +326,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginDelete(ctx context.Context, // Delete - Deletes a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginDelete" @@ -372,7 +372,7 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.ForceDeletion != nil { reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) } @@ -384,7 +384,7 @@ func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.C // Get - Gets a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -439,7 +439,7 @@ func (client *VirtualMachineScaleSetVMsClient) getCreateRequest(ctx context.Cont if options != nil && options.Expand != nil { reqQP.Set("$expand", string(*options.Expand)) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -457,7 +457,7 @@ func (client *VirtualMachineScaleSetVMsClient) getHandleResponse(resp *http.Resp // GetInstanceView - Gets the status of a virtual machine from a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -509,7 +509,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -526,7 +526,7 @@ func (client *VirtualMachineScaleSetVMsClient) getInstanceViewHandleResponse(res // NewListPager - Gets a list of all virtual machines in a VM scale sets. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - virtualMachineScaleSetName - The name of the VM scale set. // - options - VirtualMachineScaleSetVMsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.NewListPager @@ -583,7 +583,7 @@ func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Con if options != nil && options.Select != nil { reqQP.Set("$select", *options.Select) } - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -601,7 +601,7 @@ func (client *VirtualMachineScaleSetVMsClient) listHandleResponse(resp *http.Res // BeginPerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -627,7 +627,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPerformMaintenance(ctx conte // PerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPerformMaintenance" @@ -673,7 +673,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -684,7 +684,7 @@ func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(c // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -712,7 +712,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginPowerOff(ctx context.Context // charges. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginPowerOff" @@ -758,7 +758,7 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SkipShutdown != nil { reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) } @@ -771,7 +771,7 @@ func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context // back on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -798,7 +798,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRedeploy(ctx context.Context // on. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRedeploy" @@ -844,7 +844,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -853,7 +853,7 @@ func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context // BeginReimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -879,7 +879,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimage(ctx context.Context, // Reimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimage" @@ -925,7 +925,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.VMScaleSetVMReimageInput != nil { @@ -941,7 +941,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context. // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -968,7 +968,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginReimageAll(ctx context.Conte // is only supported for managed disks. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginReimageAll" @@ -1014,7 +1014,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1023,7 +1023,7 @@ func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx conte // BeginRestart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1049,7 +1049,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRestart(ctx context.Context, // Restart - Restarts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRestart" @@ -1095,7 +1095,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1105,7 +1105,7 @@ func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context. // scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1157,7 +1157,7 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreate return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") if options != nil && options.SasURIExpirationTimeInMinutes != nil { reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) } @@ -1178,7 +1178,7 @@ func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataHandle // BeginRunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1206,7 +1206,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginRunCommand(ctx context.Conte // RunCommand - Run command on a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) runCommand(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginRunCommand" @@ -1252,7 +1252,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json, text/json"} if err := runtime.MarshalAsJSON(req, parameters); err != nil { @@ -1264,7 +1264,7 @@ func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx conte // SimulateEviction - The operation to simulate the eviction of spot virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1315,7 +1315,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1324,7 +1324,7 @@ func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx // BeginStart - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set. // - instanceID - The instance ID of the virtual machine. @@ -1350,7 +1350,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginStart(ctx context.Context, r // Start - Starts a virtual machine in a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginStart" @@ -1396,7 +1396,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1405,7 +1405,7 @@ func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Co // BeginUpdate - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - resourceGroupName - The name of the resource group. // - vmScaleSetName - The name of the VM scale set where the extension should be create or updated. // - instanceID - The instance ID of the virtual machine. @@ -1432,7 +1432,7 @@ func (client *VirtualMachineScaleSetVMsClient) BeginUpdate(ctx context.Context, // Update - Updates a virtual machine of a VM scale set. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 func (client *VirtualMachineScaleSetVMsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "VirtualMachineScaleSetVMsClient.BeginUpdate" @@ -1478,7 +1478,7 @@ func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.IfMatch != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go index 02aadc8c2..80a7316ac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go @@ -46,7 +46,7 @@ func NewVirtualMachineSizesClient(subscriptionID string, credential azcore.Token // NewListPager - This API is deprecated. Use Resources Skus [https://docs.microsoft.com/rest/api/compute/resourceskus/list] // -// Generated from API version 2023-09-01 +// Generated from API version 2024-03-01 // - location - The location upon which virtual-machine-sizes is queried. // - options - VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.NewListPager // method. @@ -90,7 +90,7 @@ func (client *VirtualMachineSizesClient) listCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01") + reqQP.Set("api-version", "2024-03-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/vendor/modules.txt b/vendor/modules.txt index ef2af23bc..94c8f8536 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -38,7 +38,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -53,7 +53,7 @@ github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets # github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal -# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerregistry/armcontainerregistry v1.2.0 @@ -1619,7 +1619,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.14 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient From e048007c2468d3fb834c93f40ef32be53dc6214f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 24 Apr 2024 21:14:03 +0800 Subject: [PATCH 139/157] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d8020729f..817b6b631 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ This option does not depend on cloud provider config file, supports cross subscr > > Execute following command to install a specific version of blobfuse v2 once driver is running on the agent node: > ```console -> kubectl patch daemonset csi-blob-node -n kube-system -p '{"spec":{"template":{"spec":{"initContainers":[{"env":[{"name":"INSTALL_BLOBFUSE2","value":"true"},{"name":"BLOBFUSE2_VERSION","value":"2.2.0"}],"name":"install-blobfuse-proxy"}]}}}}' +> kubectl patch daemonset csi-blob-node -n kube-system -p '{"spec":{"template":{"spec":{"initContainers":[{"env":[{"name":"INSTALL_BLOBFUSE2","value":"true"},{"name":"BLOBFUSE2_VERSION","value":"2.2.1"}],"name":"install-blobfuse-proxy"}]}}}}' > ``` > > Execute following command to install a specific version of blobfuse v1 once driver is running on the agent node: From 2990452826dd2ee71a70c0807496df142c2458f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:09:35 +0000 Subject: [PATCH 140/157] chore(deps): bump golangci/golangci-lint-action from 4 to 5 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4 to 5. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v4...v5) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/static.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index 20a0524c4..f82e447c6 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -13,7 +13,7 @@ jobs: go-version: ^1.19 - uses: actions/checkout@master - name: Run linter - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v5 with: version: v1.54 args: -E=gofmt,unused,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s From d1cc4a4beaf0bdbebd5ed09aa9613fa0c5406a30 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 29 Apr 2024 06:33:42 +0000 Subject: [PATCH 141/157] fix: CVE-2024-3177 --- go.mod | 74 ++++++++-------- go.sum | 64 +++++++------- vendor/k8s.io/api/core/v1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- .../pkg/util/httpstream/wsstream/conn.go | 2 +- .../pkg/admission/plugin/cel/composition.go | 2 +- .../controller_reconcile.go | 5 +- .../apiserver/pkg/cel/environment/base.go | 16 ++-- .../apiserver/pkg/features/kube_features.go | 17 ++++ .../apiserver/pkg/storage/cacher/cacher.go | 15 +++- .../pkg/storage/cacher/lister_watcher.go | 30 +++++-- .../storage/cacher/watch_cache_interval.go | 17 ++++ .../pkg/storage/cacher/watch_progress.go | 9 +- .../tools/remotecommand/websocket.go | 23 ++++- .../metrics/prometheus/slis/metrics.go | 1 + .../storage/volume/helpers.go | 14 +++ .../k8s.io/kubernetes/pkg/apis/core/types.go | 2 +- .../pkg/apis/core/validation/validation.go | 42 ++++++++- .../kubernetes/pkg/features/kube_features.go | 8 +- .../k8s.io/kubernetes/pkg/volume/plugins.go | 2 +- .../kubernetes/pkg/volume/util/types/types.go | 17 ++++ .../kubernetes/test/utils/image/manifest.go | 6 +- vendor/modules.txt | 88 +++++++++---------- 24 files changed, 308 insertions(+), 152 deletions(-) diff --git a/go.mod b/go.mod index 474b0edbe..7866eab26 100644 --- a/go.mod +++ b/go.mod @@ -32,10 +32,10 @@ require ( k8s.io/api v0.30.0 k8s.io/apimachinery v0.30.0 k8s.io/client-go v0.30.0 - k8s.io/component-base v0.29.3 + k8s.io/component-base v0.29.4 k8s.io/klog/v2 v2.120.1 - k8s.io/kubernetes v1.29.0 - k8s.io/mount-utils v0.29.0 + k8s.io/kubernetes v1.29.4 + k8s.io/mount-utils v0.29.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 @@ -146,15 +146,15 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/apiserver v0.29.3 + k8s.io/apiserver v0.29.4 k8s.io/cloud-provider v0.29.3 // indirect - k8s.io/component-helpers v0.29.3 // indirect - k8s.io/controller-manager v0.29.3 // indirect - k8s.io/kms v0.29.3 // indirect + k8s.io/component-helpers v0.29.4 // indirect + k8s.io/controller-manager v0.29.4 // indirect + k8s.io/kms v0.29.4 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.29.3 // indirect - k8s.io/pod-security-admission v0.29.0 + k8s.io/pod-security-admission v0.29.4 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect @@ -170,33 +170,33 @@ require ( ) replace ( - k8s.io/api => k8s.io/api v0.29.0 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.0 - k8s.io/apiserver => k8s.io/apiserver v0.29.0 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0 - k8s.io/client-go => k8s.io/client-go v0.29.0 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0 - k8s.io/code-generator => k8s.io/code-generator v0.29.0 - k8s.io/component-base => k8s.io/component-base v0.29.0 - k8s.io/component-helpers => k8s.io/component-helpers v0.29.0 - k8s.io/controller-manager => k8s.io/controller-manager v0.29.0 - k8s.io/cri-api => k8s.io/cri-api v0.29.0 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0 - k8s.io/endpointslice => k8s.io/endpointslice v0.29.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0 - k8s.io/kubectl => k8s.io/kubectl v0.29.0 - k8s.io/kubelet => k8s.io/kubelet v0.29.0 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0 - k8s.io/metrics => k8s.io/metrics v0.29.0 - k8s.io/mount-utils => k8s.io/mount-utils v0.29.0 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.0 - k8s.io/sample-controller => k8s.io/sample-controller v0.29.0 + k8s.io/api => k8s.io/api v0.29.4 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.4 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 + k8s.io/apiserver => k8s.io/apiserver v0.29.4 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.4 + k8s.io/client-go => k8s.io/client-go v0.29.4 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.4 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.4 + k8s.io/code-generator => k8s.io/code-generator v0.29.4 + k8s.io/component-base => k8s.io/component-base v0.29.4 + k8s.io/component-helpers => k8s.io/component-helpers v0.29.4 + k8s.io/controller-manager => k8s.io/controller-manager v0.29.4 + k8s.io/cri-api => k8s.io/cri-api v0.29.4 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.4 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.4 + k8s.io/endpointslice => k8s.io/endpointslice v0.29.4 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.4 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.4 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.4 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.4 + k8s.io/kubectl => k8s.io/kubectl v0.29.4 + k8s.io/kubelet => k8s.io/kubelet v0.29.4 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.4 + k8s.io/metrics => k8s.io/metrics v0.29.4 + k8s.io/mount-utils => k8s.io/mount-utils v0.29.4 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.4 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.4 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.4 + k8s.io/sample-controller => k8s.io/sample-controller v0.29.4 ) diff --git a/go.sum b/go.sum index cf8636711..a23546a05 100644 --- a/go.sum +++ b/go.sum @@ -426,42 +426,42 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= -k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/cloud-provider v0.29.0 h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M= -k8s.io/cloud-provider v0.29.0/go.mod h1:gBCt7YYKFV4oUcJ/0xF9lS/9il4MxKunJ+ZKvh39WGo= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= -k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU= -k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc= -k8s.io/controller-manager v0.29.0 h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc= -k8s.io/controller-manager v0.29.0/go.mod h1:UKtadWkULF5bfX7vu3hHppzY/hz88C03t70GItg/x08= -k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= -k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= +k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= +k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= +k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= +k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= +k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= +k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/cloud-provider v0.29.4 h1:XRKl818NKQWan4UZ7fXFAkEJLJkBFq5sQROrbLbryM4= +k8s.io/cloud-provider v0.29.4/go.mod h1:sC7wyt5z5IRoNuU9JhEuMdRnhDEz6cWAJCyMcNUH9X8= +k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= +k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/component-helpers v0.29.4 h1:lbVFhywtv64KlaIYTKszkHaFAqwCjNn7xyRTeWorzfI= +k8s.io/component-helpers v0.29.4/go.mod h1:rMOVMGYEju7/GKMV0USfYAYJBIQdxlMMN1VFl/Mf2so= +k8s.io/controller-manager v0.29.4 h1:rzEwLboRTXBZhYUY02nNhORHQlcXGDE3EPS2IZRd0cg= +k8s.io/controller-manager v0.29.4/go.mod h1:XG6oraSxieDl6XBdO2HnkA6DwEfoCKS3OCpqO4Xb0zU= +k8s.io/csi-translation-lib v0.29.4 h1:ad0SlFsd0iB3PyXiAVVN4KxsTqgmPEHQyYGbJVQP1rA= +k8s.io/csi-translation-lib v0.29.4/go.mod h1:xdLMENgzc213O3qba2fWYPgBv3JiPqRfjsRanu2Te64= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= -k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= +k8s.io/kms v0.29.4 h1:cFGEoCLwoXk/eqYZppLZxybCdmEWeRKMCbm9f13IdRQ= +k8s.io/kms v0.29.4/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= -k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= -k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk= -k8s.io/kubelet v0.29.0/go.mod h1:kvKS2+Bz2tgDOG1S1q0TH2z1DasNuVF+8p6Aw7xvKkI= -k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= -k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= -k8s.io/mount-utils v0.29.0 h1:KcUE0bFHONQC10V3SuLWQ6+l8nmJggw9lKLpDftIshI= -k8s.io/mount-utils v0.29.0/go.mod h1:N3lDK/G1B8R/IkAt4NhHyqB07OqEr7P763z3TNge94U= -k8s.io/pod-security-admission v0.29.0 h1:tY/ldtkbBCulMYVSWg6ZDLlgDYDWy6rLj8e/AgmwSj4= -k8s.io/pod-security-admission v0.29.0/go.mod h1:bGIeKCzU0Q0Nl185NHmqcMCiOjTcqTrBfAQaeupwq0E= +k8s.io/kubectl v0.29.4 h1:2LFrAznoDZjN8JFMSUcuhER5o+yjTLzWWbOiDzVjmd8= +k8s.io/kubectl v0.29.4/go.mod h1:YTKRF9y1/ccqZ2bnpOWaJD8V7johKqZR/qOMq+0pfxU= +k8s.io/kubelet v0.29.4 h1:6fTt4sTd5xqTtIhVoS7PkiFUBevQsyu3ZmENVvwY62M= +k8s.io/kubelet v0.29.4/go.mod h1:lAu6Z17pxKwgM+9hsgGkqFjYTOhbc0dnZ6GNnlbjYW0= +k8s.io/kubernetes v1.29.4 h1:n4VCbX9cUhxHI+zw+m2iZlzT73/mrEJBHIMeauh9g4U= +k8s.io/kubernetes v1.29.4/go.mod h1:28sDhcb87LX5z3GWAKYmLrhrifxi4W9bEWua4DRTIvk= +k8s.io/mount-utils v0.29.4 h1:tW/URea4gtXlaVW7VObr52NQhS+z3SXTg1GUaFZjRL4= +k8s.io/mount-utils v0.29.4/go.mod h1:SHUMR9n3b6tLgEmlyT36cL6fV6Sjwa5CJhc0guCXvb0= +k8s.io/pod-security-admission v0.29.4 h1:XatfG2zbye9SRaHQhE7EdiIu462ak3TctnkvdrUVk7I= +k8s.io/pod-security-admission v0.29.4/go.mod h1:PNErt3eRnzVx2zxIdYmgk7vBos5Qm4c8U5QXKvXFfxQ= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index cf9b6e6eb..d099238cd 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -3286,7 +3286,7 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1aade3806..61ba21bca 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -423,7 +423,7 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 01152a096..fd6f7dc61 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1478,7 +1478,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go index 7cfdd0632..8a741936a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go @@ -344,7 +344,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + klog.Errorf("Unable to write frame (%d bytes) to %d: %v", len(data), channel, err) continue } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 646c640fc..2dbfa0991 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -178,7 +178,7 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error) } - v, details, err := a.result.Program.Eval(a.activation) + v, details, err := a.result.Program.ContextEval(a.context, a.activation) if details == nil { return types.NewErr("unable to get evaluation details of variable %q", a.name) } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index b2624694c..9cd3c01ae 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -180,8 +180,9 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, celmetrics.Metrics.ObserveDefinition(context.TODO(), "active", "deny") } - // Skip reconcile if the spec of the definition is unchanged - if info.lastReconciledValue != nil && definition != nil && + // Skip reconcile if the spec of the definition is unchanged and had a + // successful previous sync + if info.configurationError == nil && info.lastReconciledValue != nil && definition != nil && apiequality.Semantic.DeepEqual(info.lastReconciledValue.Spec, definition.Spec) { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 76a0bccee..0c1dee82d 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -62,10 +62,18 @@ var baseOpts = []VersionedOptions{ library.URLs(), library.Regex(), library.Lists(), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), }, ProgramOptions: []cel.ProgramOption{ cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), cel.CostLimit(celconfig.PerCallLimit), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), }, }, { @@ -113,14 +121,6 @@ var baseOpts = []VersionedOptions{ IntroducedVersion: version.MajorMinor(1, 29), EnvOptions: []cel.EnvOption{ ext.Sets(), - // cel-go v0.17.7 introduced CostEstimatorOptions. - // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. - cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), - }, - ProgramOptions: []cel.ProgramOption{ - // cel-go v0.17.7 introduced CostTrackerOptions. - // Previous the presence has a cost of 0 but cel fixed it to 1. We still set to 0 here to avoid breaking changes. - cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), }, }, } diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index f059cef9b..e524e0c64 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -163,6 +163,13 @@ const ( // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + // owner: @serathius + // beta: v1.30 + // + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + // owner: @apelisse, @lavalamp // alpha: v1.14 // beta: v1.16 @@ -233,6 +240,12 @@ const ( // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @serathius + // beta: 1.30 + // Enables watches without resourceVersion to be served from storage. + // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. + WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" + // owner: @vinaykul // kep: http://kep.k8s.io/1287 // alpha: v1.27 @@ -303,6 +316,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -319,6 +334,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, WatchList: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 4f4080441..900f300cd 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -25,6 +25,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "google.golang.org/grpc/metadata" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -397,10 +398,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { // so that future reuse does not get a spurious timeout. <-cacher.timer.C } - progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock) + var contextMetadata metadata.MD + if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) { + // Add grpc context metadata to watch and progress notify requests done by cacher to: + // * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher. + // * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work. + contextMetadata = metadata.New(map[string]string{"source": "cache"}) + } + + progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata) watchCache := newWatchCache( config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester) - listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata) reflectorName := "storage/cacher.go:" + config.ResourcePrefix reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) @@ -513,7 +522,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && opts.SendInitialEvents != nil { opts.SendInitialEvents = nil } - if opts.SendInitialEvents == nil && opts.ResourceVersion == "" { + if utilfeature.DefaultFeatureGate.Enabled(features.WatchFromStorageWithoutResourceVersion) && opts.SendInitialEvents == nil && opts.ResourceVersion == "" { return c.storage.Watch(ctx, key, opts) } requestedWatchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go index 1252e5e34..2817a93dd 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go @@ -19,6 +19,8 @@ package cacher import ( "context" + "google.golang.org/grpc/metadata" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -30,17 +32,19 @@ import ( // listerWatcher opaques storage.Interface to expose cache.ListerWatcher. type listerWatcher struct { - storage storage.Interface - resourcePrefix string - newListFunc func() runtime.Object + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object + contextMetadata metadata.MD } // NewListerWatcher returns a storage.Interface backed ListerWatcher. -func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { +func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher { return &listerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + contextMetadata: contextMetadata, } } @@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error Predicate: pred, Recursive: true, } - if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil { return nil, err } return list, nil @@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err Recursive: true, ProgressNotify: true, } - return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + return lw.storage.Watch(ctx, lw.resourcePrefix, opts) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index c455357e0..2b57dd165 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -18,6 +18,7 @@ package cacher import ( "fmt" + "sort" "sync" "k8s.io/apimachinery/pkg/fields" @@ -114,9 +115,24 @@ func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValida } } +type sortableWatchCacheEvents []*watchCacheEvent + +func (s sortableWatchCacheEvents) Len() int { + return len(s) +} + +func (s sortableWatchCacheEvents) Less(i, j int) bool { + return s[i].Key < s[j].Key +} + +func (s sortableWatchCacheEvents) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // newCacheIntervalFromStore is meant to handle the case of rv=0, such that the events // returned by Next() need to be events from a List() done on the underlying store of // the watch cache. +// The items returned in the interval will be sorted by Key. func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc) (*watchCacheInterval, error) { buffer := &watchCacheIntervalBuffer{} allItems := store.List() @@ -140,6 +156,7 @@ func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getA } buffer.endIndex++ } + sort.Sort(sortableWatchCacheEvents(buffer.buffer)) ci := &watchCacheInterval{ startIndex: 0, // Simulate that we already have all the events we're looking for. diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go index f44ca9325..13f50bc18 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go @@ -21,6 +21,8 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -34,10 +36,11 @@ const ( progressRequestPeriod = 100 * time.Millisecond ) -func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester { +func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester { pr := &conditionalProgressRequester{ clock: clock, requestWatchProgress: requestWatchProgress, + contextMetadata: contextMetadata, } pr.cond = sync.NewCond(pr.mux.RLocker()) return pr @@ -54,6 +57,7 @@ type TickerFactory interface { type conditionalProgressRequester struct { clock TickerFactory requestWatchProgress WatchProgressRequester + contextMetadata metadata.MD mux sync.RWMutex cond *sync.Cond @@ -63,6 +67,9 @@ type conditionalProgressRequester struct { func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { ctx := wait.ContextForChannel(stopCh) + if pr.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata) + } go func() { defer utilruntime.HandleCrash() <-stopCh diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go index a60986dec..49ef4717c 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go @@ -187,6 +187,9 @@ type wsStreamCreator struct { // map of stream id to stream; multiple streams read/write the connection streams map[byte]*stream streamsMu sync.Mutex + // setStreamErr holds the error to return to anyone calling setStreams. + // this is populated in closeAllStreamReaders + setStreamErr error } func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { @@ -202,10 +205,14 @@ func (c *wsStreamCreator) getStream(id byte) *stream { return c.streams[id] } -func (c *wsStreamCreator) setStream(id byte, s *stream) { +func (c *wsStreamCreator) setStream(id byte, s *stream) error { c.streamsMu.Lock() defer c.streamsMu.Unlock() + if c.setStreamErr != nil { + return c.setStreamErr + } c.streams[id] = s + return nil } // CreateStream uses id from passed headers to create a stream over "c.conn" connection. @@ -228,7 +235,11 @@ func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, connWriteLock: &c.connWriteLock, id: id, } - c.setStream(id, s) + if err := c.setStream(id, s); err != nil { + _ = s.writePipe.Close() + _ = s.readPipe.Close() + return nil, err + } return s, nil } @@ -312,7 +323,7 @@ func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, de } // closeAllStreamReaders closes readers in all streams. -// This unblocks all stream.Read() calls. +// This unblocks all stream.Read() calls, and keeps any future streams from being created. func (c *wsStreamCreator) closeAllStreamReaders(err error) { c.streamsMu.Lock() defer c.streamsMu.Unlock() @@ -320,6 +331,12 @@ func (c *wsStreamCreator) closeAllStreamReaders(err error) { // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. _ = s.writePipe.CloseWithError(err) } + // ensure callers to setStreams receive an error after this point + if err != nil { + c.setStreamErr = err + } else { + c.setStreamErr = fmt.Errorf("closed all streams") + } } type stream struct { diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 3d464d12d..39cd2ba28 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -57,6 +57,7 @@ var ( func Register(registry k8smetrics.KubeRegistry) { registry.Register(healthcheck) registry.Register(healthchecksTotal) + _ = k8smetrics.RegisterProcessStartTime(registry.Register) } func ResetHealthMetrics() { diff --git a/vendor/k8s.io/component-helpers/storage/volume/helpers.go b/vendor/k8s.io/component-helpers/storage/volume/helpers.go index 7ec376f34..5068c6546 100644 --- a/vendor/k8s.io/component-helpers/storage/volume/helpers.go +++ b/vendor/k8s.io/component-helpers/storage/volume/helpers.go @@ -24,6 +24,20 @@ import ( "k8s.io/component-helpers/scheduling/corev1" ) +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} + // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index fa1242b8a..6a3f888ab 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -392,7 +392,7 @@ type PersistentVolumeStatus struct { Reason string // LastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index a6f7fef30..1885531f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -5141,6 +5141,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS return allErrs } +// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted +func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if podSpec.RestartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure { + continue + } + + // Skip any restartable init container that is allowed to restart + isRestartableInitContainer := false + for _, c := range podSpec.InitContainers { + if oldStatus.Name == c.Name { + if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways { + isRestartableInitContainer = true + } + break + } + } + if isRestartableInitContainer { + continue + } + + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + // ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation. func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") @@ -5162,7 +5202,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions // If pod should not restart, make sure the status update does not transition // any terminated containers to a non-terminated state. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) - allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...) // The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...) allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index edd33feb7..b0b12d6eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -259,7 +259,6 @@ const ( // owner: @harche // kep: http://kep.k8s.io/3386 // alpha: v1.25 - // beta: v1.27 // // Allows using event-driven PLEG (pod lifecycle event generator) through kubelet // which avoids frequent relisting of containers which helps optimize performance. @@ -617,6 +616,7 @@ const ( // owner: @RomanBednar // kep: https://kep.k8s.io/3762 // alpha: v1.28 + // beta: v1.29 // // Adds a new field to persistent volumes which holds a timestamp of when the volume last transitioned its phase. PersistentVolumeLastPhaseTransitionTime featuregate.Feature = "PersistentVolumeLastPhaseTransitionTime" @@ -1048,7 +1048,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - EventedPLEG: {Default: false, PreRelease: featuregate.Beta}, // off by default, requires CRI Runtime support + EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update @@ -1263,6 +1263,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -1273,6 +1275,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta}, + genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 94c2330af..6ce01755f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -1064,7 +1064,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.0", + Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.2", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go index af309353b..238e919b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go @@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool { return errors.As(err, &failedPreconditionError) } +type OperationNotSupported struct { + msg string +} + +func (err *OperationNotSupported) Error() string { + return err.msg +} + +func NewOperationNotSupportedError(msg string) *OperationNotSupported { + return &OperationNotSupported{msg: msg} +} + +func IsOperationNotSupportedError(err error) bool { + var operationNotSupportedError *OperationNotSupported + return errors.As(err, &operationNotSupportedError) +} + // TransientOperationFailure indicates operation failed with a transient error // and may fix itself when retried. type TransientOperationFailure struct { diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 72f2394e9..0d83a7cc8 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -232,7 +232,7 @@ const ( func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) { configs := map[ImageID]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} @@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.3"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.7"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.12-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} diff --git a/vendor/modules.txt b/vendor/modules.txt index 94c8f8536..d8875935a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -763,7 +763,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.30.0 => k8s.io/api v0.29.0 +# k8s.io/api v0.30.0 => k8s.io/api v0.29.4 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -819,12 +819,12 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.29.0 +# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.29.4 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.30.0 => k8s.io/apimachinery v0.29.0 +# k8s.io/apimachinery v0.30.0 => k8s.io/apimachinery v0.29.4 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -887,7 +887,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.3 => k8s.io/apiserver v0.29.0 +# k8s.io/apiserver v0.29.4 => k8s.io/apiserver v0.29.4 ## explicit; go 1.21 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1035,7 +1035,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.30.0 => k8s.io/client-go v0.29.0 +# k8s.io/client-go v0.30.0 => k8s.io/client-go v0.29.4 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1358,7 +1358,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.29.3 => k8s.io/cloud-provider v0.29.0 +# k8s.io/cloud-provider v0.29.3 => k8s.io/cloud-provider v0.29.4 ## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1374,7 +1374,7 @@ k8s.io/cloud-provider/names k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/options k8s.io/cloud-provider/service/helpers -# k8s.io/component-base v0.29.3 => k8s.io/component-base v0.29.0 +# k8s.io/component-base v0.29.4 => k8s.io/component-base v0.29.4 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1396,14 +1396,14 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.29.3 => k8s.io/component-helpers v0.29.0 +# k8s.io/component-helpers v0.29.4 => k8s.io/component-helpers v0.29.4 ## explicit; go 1.21 k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.29.3 => k8s.io/controller-manager v0.29.0 +# k8s.io/controller-manager v0.29.4 => k8s.io/controller-manager v0.29.4 ## explicit; go 1.21 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1424,7 +1424,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kms v0.29.3 +# k8s.io/kms v0.29.4 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1451,15 +1451,15 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.0 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.4 ## explicit; go 1.21 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.29.3 => k8s.io/kubelet v0.29.0 +# k8s.io/kubelet v0.29.3 => k8s.io/kubelet v0.29.4 ## explicit; go 1.21 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.29.0 +# k8s.io/kubernetes v1.29.4 ## explicit; go 1.21 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1527,10 +1527,10 @@ k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/format k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/test/utils/kubeconfig -# k8s.io/mount-utils v0.29.0 => k8s.io/mount-utils v0.29.0 +# k8s.io/mount-utils v0.29.4 => k8s.io/mount-utils v0.29.4 ## explicit; go 1.21 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.29.0 => k8s.io/pod-security-admission v0.29.0 +# k8s.io/pod-security-admission v0.29.4 => k8s.io/pod-security-admission v0.29.4 ## explicit; go 1.21 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy @@ -1680,32 +1680,32 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -# k8s.io/api => k8s.io/api v0.29.0 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0 -# k8s.io/apimachinery => k8s.io/apimachinery v0.29.0 -# k8s.io/apiserver => k8s.io/apiserver v0.29.0 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0 -# k8s.io/client-go => k8s.io/client-go v0.29.0 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0 -# k8s.io/code-generator => k8s.io/code-generator v0.29.0 -# k8s.io/component-base => k8s.io/component-base v0.29.0 -# k8s.io/component-helpers => k8s.io/component-helpers v0.29.0 -# k8s.io/controller-manager => k8s.io/controller-manager v0.29.0 -# k8s.io/cri-api => k8s.io/cri-api v0.29.0 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0 -# k8s.io/endpointslice => k8s.io/endpointslice v0.29.0 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0 -# k8s.io/kubectl => k8s.io/kubectl v0.29.0 -# k8s.io/kubelet => k8s.io/kubelet v0.29.0 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0 -# k8s.io/metrics => k8s.io/metrics v0.29.0 -# k8s.io/mount-utils => k8s.io/mount-utils v0.29.0 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.0 -# k8s.io/sample-controller => k8s.io/sample-controller v0.29.0 +# k8s.io/api => k8s.io/api v0.29.4 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.4 +# k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 +# k8s.io/apiserver => k8s.io/apiserver v0.29.4 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.4 +# k8s.io/client-go => k8s.io/client-go v0.29.4 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.4 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.4 +# k8s.io/code-generator => k8s.io/code-generator v0.29.4 +# k8s.io/component-base => k8s.io/component-base v0.29.4 +# k8s.io/component-helpers => k8s.io/component-helpers v0.29.4 +# k8s.io/controller-manager => k8s.io/controller-manager v0.29.4 +# k8s.io/cri-api => k8s.io/cri-api v0.29.4 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.4 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.4 +# k8s.io/endpointslice => k8s.io/endpointslice v0.29.4 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.4 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.4 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.4 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.4 +# k8s.io/kubectl => k8s.io/kubectl v0.29.4 +# k8s.io/kubelet => k8s.io/kubelet v0.29.4 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.4 +# k8s.io/metrics => k8s.io/metrics v0.29.4 +# k8s.io/mount-utils => k8s.io/mount-utils v0.29.4 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.4 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.4 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.4 +# k8s.io/sample-controller => k8s.io/sample-controller v0.29.4 From 3f3e98ebada0aa62c4177beaa59951f8ba3acc56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:31:41 +0000 Subject: [PATCH 142/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.17.1 to 2.17.2. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.17.1...v2.17.2) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 17 ++++++++-------- .../go-task/slim-sprig/{ => v3}/.editorconfig | 0 .../slim-sprig/{ => v3}/.gitattributes | 0 .../go-task/slim-sprig/{ => v3}/.gitignore | 0 .../go-task/slim-sprig/{ => v3}/CHANGELOG.md | 19 ++++++++++++++++++ .../go-task/slim-sprig/{ => v3}/LICENSE.txt | 0 .../go-task/slim-sprig/{ => v3}/README.md | 2 +- .../go-task/slim-sprig/{ => v3}/Taskfile.yml | 2 +- .../go-task/slim-sprig/{ => v3}/crypto.go | 0 .../go-task/slim-sprig/{ => v3}/date.go | 0 .../go-task/slim-sprig/{ => v3}/defaults.go | 0 .../go-task/slim-sprig/{ => v3}/dict.go | 0 .../go-task/slim-sprig/{ => v3}/doc.go | 0 .../go-task/slim-sprig/{ => v3}/functions.go | 0 .../go-task/slim-sprig/{ => v3}/list.go | 0 .../go-task/slim-sprig/{ => v3}/network.go | 0 .../go-task/slim-sprig/{ => v3}/numeric.go | 0 .../go-task/slim-sprig/{ => v3}/reflect.go | 0 .../go-task/slim-sprig/{ => v3}/regex.go | 0 .../go-task/slim-sprig/{ => v3}/strings.go | 0 .../go-task/slim-sprig/{ => v3}/url.go | 0 .../github.com/google/pprof/profile/encode.go | 3 +++ .../pprof/profile/legacy_java_profile.go | 4 ++-- .../github.com/google/pprof/profile/merge.go | 4 +++- .../google/pprof/profile/profile.go | 14 ++++++++++--- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 20 +++++++++++++++++++ .../v2/ginkgo/generators/bootstrap_command.go | 2 +- .../v2/ginkgo/generators/generate_command.go | 3 ++- .../ginkgo/internal/profiles_and_reports.go | 2 ++ .../ginkgo/v2/reporters/default_reporter.go | 6 +++++- .../onsi/ginkgo/v2/reporters/junit_report.go | 1 + .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/modules.txt | 14 ++++++------- 34 files changed, 91 insertions(+), 32 deletions(-) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.editorconfig (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitattributes (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitignore (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/CHANGELOG.md (95%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/LICENSE.txt (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/README.md (88%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/Taskfile.yml (89%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/crypto.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/date.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/defaults.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/dict.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/doc.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/functions.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/list.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/network.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/numeric.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/reflect.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/regex.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/strings.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/url.go (100%) diff --git a/go.mod b/go.mod index 7866eab26..2c7f3222b 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/ginkgo/v2 v2.17.2 github.com/onsi/gomega v1.33.0 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 @@ -77,7 +77,6 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect @@ -86,7 +85,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20230602010524-ada837c32108 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -136,7 +135,7 @@ require ( golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.20.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect @@ -165,6 +164,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect github.com/distribution/reference v0.5.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect ) diff --git a/go.sum b/go.sum index a23546a05..85a181ec4 100644 --- a/go.sum +++ b/go.sum @@ -122,8 +122,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -154,8 +154,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20230602010524-ada837c32108 h1:y+JfwMOPwQwIrnh3TUPwwtOAhONoppkHiSa4sQBoK2k= -github.com/google/pprof v0.0.0-20230602010524-ada837c32108/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -211,8 +211,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -259,7 +259,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -390,8 +389,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebffc..2ce45dd4e 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471f..b5ab56425 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223b..8e6346bb1 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 182c926b9..860bb304c 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c..4580bab18 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282cb..eee0132e7 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 60ef7e926..62df80a55 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -145,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -627,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 44222220a..0a8949799 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,23 @@ +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + ## 2.17.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a..b2dc59be6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec97..cf3b7cb6d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 5f35864dd..8e16d2bb0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -161,6 +161,7 @@ func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { if err != nil { return err } + defer dst.Close() err = DumpCoverProfiles(merged, dst) if err != nil { return err @@ -196,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 4026859ec..980973370 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -419,7 +419,11 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) if r.conf.GithubOutput { - r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } else { r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 43244a9bd..2a3215b51 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -324,6 +324,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 851d42b45..5dd0140cd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.1" +const VERSION = "2.17.2" diff --git a/vendor/modules.txt b/vendor/modules.txt index d8875935a..101b2276a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -213,9 +213,9 @@ github.com/go-openapi/jsonreference/internal # github.com/go-openapi/swag v0.22.4 ## explicit; go 1.18 github.com/go-openapi/swag -# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 -## explicit; go 1.13 -github.com/go-task/slim-sprig +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 # github.com/gofrs/uuid v4.4.0+incompatible ## explicit github.com/gofrs/uuid @@ -287,7 +287,7 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20230602010524-ada837c32108 +# github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 @@ -350,7 +350,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.17.1 +# github.com/onsi/ginkgo/v2 v2.17.2 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -630,8 +630,8 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.18.0 -## explicit; go 1.18 +# golang.org/x/tools v0.20.0 +## explicit; go 1.19 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector # google.golang.org/appengine v1.6.7 From 11a4aa7e710a6bc530c65b6bf40a1bd7615a6a27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:50:18 +0000 Subject: [PATCH 143/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.7 to 0.0.8. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.7...pkg/azclient/v0.0.8) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 2c7f3222b..86fc4e2b8 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 85a181ec4..dcee6ecc4 100644 --- a/go.sum +++ b/go.sum @@ -469,8 +469,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp4 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 h1:dAvrFZFLIVoOXTSrU5jTLyUdNlyaExrBqD2YvyRjpI4= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15/go.mod h1:gd4xk5narc09IQvDR8xyr5FyDmB0Y49+bLOUlMaGecs= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 h1:1qp8hglOZ2trgdC6ZdxrSsrN3YPy/hFSICDlUUHi+bA= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7/go.mod h1:MCkyXY+mUB1RGC+fqVQrUKxsLumryR9RKkcJHYBQa+k= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 h1:Ix/gpEadY+rOsC2lEEP60g7WpdTOxzdOEpS3+xMkopE= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8/go.mod h1:b0gqf/Z49wro1afaTPgXZkVwJGTihnDSe0c80LYQ5JE= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index 101b2276a..4cb6178f8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1662,7 +1662,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.7 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From 98015259b440508efd2960d0fcbdc32b883c5002 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 May 2024 16:53:19 +0000 Subject: [PATCH 144/157] chore(deps): bump google.golang.org/protobuf from 1.33.0 to 1.34.0 Bumps google.golang.org/protobuf from 1.33.0 to 1.34.0. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../protobuf/encoding/protojson/encode.go | 20 ++- .../protobuf/encoding/prototext/encode.go | 20 ++- .../protobuf/internal/descfmt/stringer.go | 1 + .../editiondefaults/editions_defaults.binpb | Bin 63 -> 78 bytes .../internal/editionssupport/editions.go | 13 ++ .../protobuf/internal/encoding/tag/tag.go | 4 +- .../protobuf/internal/errors/errors.go | 15 ++ .../protobuf/internal/filedesc/desc.go | 84 +++++++---- .../protobuf/internal/filedesc/desc_init.go | 41 ++++- .../protobuf/internal/filedesc/desc_lazy.go | 44 +----- .../protobuf/internal/filedesc/editions.go | 18 ++- .../protobuf/internal/filedesc/placeholder.go | 1 + .../internal/genid/go_features_gen.go | 2 +- .../protobuf/internal/impl/codec_field.go | 64 +++++--- .../protobuf/internal/impl/codec_map.go | 15 +- .../protobuf/internal/impl/legacy_enum.go | 1 + .../internal/impl/legacy_extension.go | 2 +- .../protobuf/internal/impl/legacy_file.go | 4 +- .../protobuf/internal/impl/legacy_message.go | 10 +- .../protobuf/internal/impl/message_reflect.go | 31 ++-- .../internal/impl/message_reflect_gen.go | 142 ++++++++++-------- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 + .../protobuf/proto/encode.go | 44 +++++- .../protobuf/proto/extension.go | 11 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/size.go | 2 + .../protobuf/reflect/protodesc/desc.go | 13 +- .../protobuf/reflect/protodesc/desc_init.go | 49 ++---- .../reflect/protodesc/desc_validate.go | 61 ++++---- .../protobuf/reflect/protodesc/editions.go | 5 - .../protobuf/reflect/protodesc/proto.go | 22 +++ .../protobuf/reflect/protoreflect/proto.go | 2 +- .../protobuf/reflect/protoreflect/type.go | 6 + .../types/gofeaturespb/go_features.pb.go | 116 +++++++------- .../types/gofeaturespb/go_features.proto | 28 ---- vendor/modules.txt | 3 +- 39 files changed, 539 insertions(+), 372 deletions(-) create mode 100644 vendor/google.golang.org/protobuf/internal/editionssupport/editions.go delete mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto diff --git a/go.mod b/go.mod index 86fc4e2b8..2bb0abc0f 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( go.uber.org/mock v0.4.0 golang.org/x/net v0.24.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.0 k8s.io/api v0.30.0 k8s.io/apimachinery v0.30.0 k8s.io/client-go v0.30.0 diff --git a/go.sum b/go.sum index dcee6ecc4..e52b7e6a2 100644 --- a/go.sum +++ b/go.sum @@ -405,8 +405,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= +google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 3f75098b6..29846df22 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,15 +25,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in JSON format using default options. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -110,8 +112,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal marshals the given [proto.Message] in the JSON format using options in -// MarshalOptions. Do not depend on the output being stable. It may change over -// time across different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e811..1f57e6610 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8d..87e46bd4d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 18f0756874367adcdb790ffde125b6a7388b4eaa..f691305eb4f73440cd48caba949023eab52ef304 100644 GIT binary patch literal 78 zcmd-Q6B6WL6kw8IQef6#G+?@9$Hc)X@r<1dB+ewjD8Z<}1Qcfki8Dw%hln$xi@#u3 Kc*d^rf*k;APzzxI literal 63 zcmd-Q6yo7v6kw8IQef6#G+>f=#?A#2ViI7KU{qiN3NcDNhX^qu3B6!fc*d^rf*k<7 Cln3+x diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 000000000..029a6a12d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d20837..7e87c7604 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35e..d96719829 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 8826bcf40..ece53bea3 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -251,10 +258,6 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool { if fd.L1.Cardinality == protoreflect.Repeated { return false } - explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence - return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional @@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool { case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: return false } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsPacked - } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { - // proto3 repeated fields are packed by default. - return !fd.L1.HasPacked || fd.L1.IsPacked - } - return fd.L1.IsPacked + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -399,13 +394,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsUTF8Validated - } - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -438,7 +427,6 @@ type ( Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -461,7 +449,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -542,8 +539,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -585,6 +583,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -605,7 +631,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 237e64fd2..3bc3b1cdf 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 case "editions": fd.L1.Syntax = protoreflect.Editions default: @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 } - if fd.L1.Syntax == protoreflect.Editions { - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - } + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) // Parse editions features from options if any if options != nil { @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 482a61cc1..570181eb4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -466,10 +466,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { fd.L1.Kind = protoreflect.GroupKind } - if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + if fd.L1.EditionFeatures.IsLegacyRequired { fd.L1.Cardinality = protoreflect.Required } if rawTypeName != nil { @@ -496,13 +496,11 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -548,7 +546,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte - xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -572,7 +569,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -580,12 +576,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } - if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } - if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { - xd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -598,32 +588,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 0375a49d4..d1e16a26d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -14,9 +14,13 @@ import ( ) var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} func init() { unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) } func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { @@ -110,6 +114,7 @@ func unmarshalEditionDefault(b []byte) { } } defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) } func unmarshalEditionDefaults(b []byte) { @@ -135,8 +140,15 @@ func unmarshalEditionDefaults(b []byte) { } func getFeaturesFor(ed Edition) EditionFeatures { - if def, ok := defaultsCache[ed]; ok { - return def + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) } - panic(fmt.Sprintf("unsupported edition: %v", ed)) + return defaultsCache[match] } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc5..bfb3b8417 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index fd9015e8e..9a652a2b4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -10,7 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" ) -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" // Names for google.protobuf.GoFeatures. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241e..78ee47e44 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16f..fb35f0bae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb2..c1c33d005 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d050..6e8677ee6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab091086..b649f1124 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c6297..950e9a1fe 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010be..a6f0dbdad 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -247,11 +247,10 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false @@ -261,25 +260,22 @@ func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +288,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +298,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -428,7 +423,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +452,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b6..29ba6bd35 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -27,8 +27,9 @@ func (m *messageState) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -147,8 +158,9 @@ func (m *messageReflectWrapper) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a50fcfb49..fc6bfc396 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 33 + Minor = 34 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b567..d75a6534c 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f9..1f847bcc3 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a7..c9c8721a6 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45c..575d14831 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49b..052fb5ae3 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index baa0cc621..8fbecb4f5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,7 @@ package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -91,15 +92,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 case "editions": f.L1.Syntax = protoreflect.Editions f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) } f.L1.Path = fd.GetName() @@ -114,9 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - if f.L1.Syntax == protoreflect.Editions { - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index b3278163c..856175542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } - if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) - } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -146,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -163,32 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.StringName.InitJSON(fd.GetJsonName()) } - if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - // We reuse the existing field because the old option `[packed = - // true]` is mutually exclusive with the editions feature. - if canBePacked(fd) { - f.L1.HasPacked = true - f.L1.IsPacked = f.L1.EditionFeatures.IsPacked - } - - // We pretend this option is always explicitly set because the only - // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 - // or to return the appropriate default. - // When using editions we either parse the option or resolve the - // appropriate default here (instead of later when this option is - // requested from the descriptor). - // In proto2/proto3 syntax HasEnforceUTF8 might be false. - f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind } } return fs, nil @@ -201,12 +181,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } - if parent.Syntax() == protoreflect.Editions { - o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) - } } } return os, nil @@ -220,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index e4dcaf876..c62930867 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,10 +109,10 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } @@ -149,7 +151,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +164,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +220,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +272,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +314,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() == protoreflect.Proto3: + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 2a6b29d17..f6a1fec6e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -17,11 +17,6 @@ import ( gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) -const ( - SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 - SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 -) - var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9d6e05420..a5de8d400 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 00b01fbd8..c85bfaa5b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4c..5b80afe52 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -544,6 +544,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 25de5ae00..9f046f979 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -6,9 +6,9 @@ // https://developers.google.com/open-source/licenses/bsd // Code generated by protoc-gen-go. DO NOT EDIT. -// source: reflect/protodesc/proto/go_features.proto +// source: google/protobuf/go_features.proto -package proto +package gofeaturespb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -30,7 +30,7 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -43,7 +43,7 @@ func (x *GoFeatures) String() string { func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -56,7 +56,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message { // Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} } func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { @@ -66,69 +66,67 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } -var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), ExtensionType: (*GoFeatures)(nil), Field: 1002, - Name: "google.protobuf.go", + Name: "pb.go", Tag: "bytes,1002,opt,name=go", - Filename: "reflect/protodesc/proto/go_features.proto", + Filename: "google/protobuf/go_features.proto", }, } // Extension fields to descriptorpb.FeatureSet. var ( - // optional google.protobuf.GoFeatures go = 1002; - E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] ) -var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor - -var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, - 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, - 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, - 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, - 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, - 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x0a, 0x47, 0x6f, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x1f, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0x52, 0x17, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, + 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, + 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, + 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x70, 0x62, } var ( - file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once - file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc ) -func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { - file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { - file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) }) - return file_reflect_protodesc_proto_go_features_proto_rawDescData + return file_google_protobuf_go_features_proto_rawDescData } -var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ - (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } -var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ - 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet - 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 1, // [1:2] is the sub-list for extension type_name @@ -136,13 +134,13 @@ var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for field type_name } -func init() { file_reflect_protodesc_proto_go_features_proto_init() } -func file_reflect_protodesc_proto_go_features_proto_init() { - if File_reflect_protodesc_proto_go_features_proto != nil { +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GoFeatures); i { case 0: return &v.state @@ -159,19 +157,19 @@ func file_reflect_protodesc_proto_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, NumEnums: 0, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, - GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, - DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, - MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, - ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() - File_reflect_protodesc_proto_go_features_proto = out.File - file_reflect_protodesc_proto_go_features_proto_rawDesc = nil - file_reflect_protodesc_proto_go_features_proto_goTypes = nil - file_reflect_protodesc_proto_go_features_proto_depIdxs = nil + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto deleted file mode 100644 index d24657129..000000000 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto +++ /dev/null @@ -1,28 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -syntax = "proto2"; - -package google.protobuf; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/protobuf/types/gofeaturespb"; - -extend google.protobuf.FeatureSet { - optional GoFeatures go = 1002; -} - -message GoFeatures { - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - optional bool legacy_unmarshal_json_enum = 1 [ - retention = RETENTION_RUNTIME, - targets = TARGET_TYPE_ENUM, - edition_defaults = { edition: EDITION_PROTO2, value: "true" }, - edition_defaults = { edition: EDITION_PROTO3, value: "false" } - ]; -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4cb6178f8..22e517379 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -710,7 +710,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.33.0 +# google.golang.org/protobuf v1.34.0 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -719,6 +719,7 @@ google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset From 2066021ba1a2299b791df487f5280abc6d7df3db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 May 2024 16:17:37 +0000 Subject: [PATCH 145/157] chore(deps): bump github.com/onsi/gomega from 1.33.0 to 1.33.1 Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.33.0 to 1.33.1. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.33.0...v1.33.1) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/gomega/CHANGELOG.md | 8 ++++++++ vendor/github.com/onsi/gomega/gomega_dsl.go | 6 +++--- vendor/modules.txt | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 2bb0abc0f..40434c1cb 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 github.com/onsi/ginkgo/v2 v2.17.2 - github.com/onsi/gomega v1.33.0 + github.com/onsi/gomega v1.33.1 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index e52b7e6a2..3c8f03294 100644 --- a/go.sum +++ b/go.sum @@ -213,8 +213,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= -github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 89230f199..62af14ad2 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,11 @@ +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + ## 1.33.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1980a63c7..9697d5134 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.33.0" +const GOMEGA_VERSION = "1.33.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() diff --git a/vendor/modules.txt b/vendor/modules.txt index 22e517379..ed9add388 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -372,7 +372,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.33.0 +# github.com/onsi/gomega v1.33.1 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format From 191dc31b2e1bf800c6a4ccd6f79c752e90a1ea0c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 5 May 2024 03:32:02 +0000 Subject: [PATCH 146/157] fix: liveness probe failure when hostNetwork not enabled in controller --- charts/latest/blob-csi-driver-v0.0.0.tgz | Bin 5980 -> 6050 bytes .../templates/csi-blob-controller.yaml | 15 ++++++++++++++- charts/v1.24.0/blob-csi-driver-v1.24.0.tgz | Bin 5965 -> 6033 bytes .../templates/csi-blob-controller.yaml | 15 ++++++++++++++- charts/v1.24.1/blob-csi-driver-v1.24.1.tgz | Bin 5981 -> 6041 bytes .../templates/csi-blob-controller.yaml | 15 ++++++++++++++- 6 files changed, 42 insertions(+), 3 deletions(-) diff --git a/charts/latest/blob-csi-driver-v0.0.0.tgz b/charts/latest/blob-csi-driver-v0.0.0.tgz index c4864270d7048b336bb9ebe4033026d3ae8ce1a2..37ec283c098df13f5fa45ce1565c2bf195781726 100644 GIT binary patch delta 6003 zcmV-(7mVoKE}}1xPk&Z3e&JY3s)-|eBqzQ5cRC)3TuC^S01JSMRoCDBHvmZSS)ydc zPVa;_iOI$80^D6Jb{EjZWfRL5#Bw-U0B>H*F_(=c_T2CG)>^Gr>-6|o|J!P{vj4W) zr$^tlk5AhtCvQ$qk50a8wU66xPQOE~{jpHq0wFQ~uJvSE*<-=|MH=?WXW)V`+C__Y z4g3D;PZvuNvgUxWIq{`_cK%1e0ljtE1dS!*cn0WVJU~WxY7~3WMH!K*EmrtjjaH*o zd+8w`TB2q3e}UZq#C{FHdi`&=+wD$P|BsJa2mODJMw2lC8h_Sh69t732cHQd8DDl$ z^X^B{v;}EKQludyo~?|B`r8j&_nNuHUUt!7I%M*ig8-^t+Janjwjjzr052fGglOIc z6W|mi2t`8Cv;4SIOmcCo&C3RoE?EEtA+9-_KsAa{7hiQr(CakXoy`O(bD*b{YXmbQ zB*$9}RCNwj(|_Vo_cRL?#%v2R0zv-RW{SD#kf7CW{M0yFPpW3SEO5eQ;yReP#J*5Z zYPDCd&Skqy)Xi6;ZMAK$;N@f8E%V&;dGb%Bmz?%x6rGJrGJ|_z-A)3==v!f`T`$8Lv9;1>o z*9G2HUw_&%%D8%H35;GL#Vle|G!9t^`3>hhqR+UV{!nTY=(HT}fT+eN*v?LhLcj79 zv}jx?PHp94DAbrzCb9ZU%LH~D{g3M+IS1r3N7Q`goX1dI^zfKRo8nu0Mi*+ z7P<(8WL~Bpp^_mFBzCaGHCuxuIwJH6-Tf#m?0@@t&qwie2@BAE5=PgYFizz1!o@=4 z2P%|1r9Y-(3Wuw`@)%v9YVDkHHQUw3auxmuRLqRrASwpK%#b^-?m#1BzBkhp<4D zQGd>uM3W^FAf-}itdw4(yD-=kg~<}ZJ@$MTP*?q(0qLR#h5KB55=zwDtyUX7*787) zwOT?YR>D@G8cHsedAiW^6>D`DVP(K9{n9KpPC2yju?=0MK8R)JuZEPYu`hkkSF^k7 zvl9O~6LJXh3*&byJ0a&ptmemNf$J!Ra({@x6Bit11Y*XK!$8feL=N~s9mFX5I37gZ zYe-2fC$_j|Ty|0Cr_-b3;4btW(p=gG+2Ix=8;O&@1{8D|zS|gA_fyF#KG* zm_q!@Wiuu7ZWo5EE51A;Mf8_q-qUN-NvrjdgiksA9)L(f*?yp;z<qNZ@##>IT{X}B1i&Rdd^F$iih5Tzx9iC2!Qs>abDeFH5Wm*{zA4%o{53R4pVxc=& zc2Qlg6r8!xNLwgROHe(e$b8jXVXOD=iI5_C0n;hivWx0NHn!*B1TNGo;SZI=SHk$! zsaQ`SQ`1Xp-YludFJ_SR+W~b6g@4b?4LtY&%X8ZXA$}opK5&3aEe?^Ad-(nn#S2Va zWu3aHU1V}js+pXloY4TPu)cNKWEu##=InmC;&hUFsu2<@BzD~hegzr>uVNbYcH_8l zQZMS(Dd^XUJ9ZkKM%#20AV1>!D<9}P;=(ZS51o#;`^vuG5D&%@d;WlGkRD0cVo&*B*T6j@gz<)2Z{EDg+umojhNZin z7dF68C4&E;Q}G2$d+xB=*B#MLM!3)x8GSletbf$I6eD-FDPS3JEeliep0mJ@c6B4b z&M%xuxS}>F6Yy?)v-Dww>wi5+V<*NEtG$wu#hUSZyD101VGGjG<|~XP7#~_WapyBo z%+wW+LG<@Hm`=%k7uB!EV`~nsr{3!R@7cKb;c{gu>UH!At!xl7kt_B12g;N|5&w9N zdi}Sb-oHkJ;k&EX=$G@+a4>xT8udq`tC4cUlrJ5c7tMrv5dU3s34aal)o7F{bwPGf zbAfr&C6i|Oq^5ORa@Iu-ObDj6nsVw~hMkqs)MlChm4jYL<`w#tX&)TBzoEfYJJILo zJ*Di*@iH42y@TQw7J|$uILN~k&y*t=;zh)GfQ0J)LhZ{WP_;?KLO%+78wMCDQihm`8%(6tdD4x}^?`QmV zg(-&wJwDcvW$9J?f8A`A>;LT>Tpze-NdJkcpbh%pIXXSg=>O@-A^-b4?ct&MPqZLj zS33t&;(}cIaOSCLvFFf5|7<=!)@mB8fA3@JXrFd^Qi=zrk>MY^5(HVstRNpFs+Z8~PH zXF1Czc3#G>o8VKuB-C~Tr))GOoY~Ct8PywyM5sLv=VLA{Hr;yUp~q`Ymg%ua36F^P zY4qjKS2YvG2v_wF3R|wrpXm31$<(YqKGxK}ry=G|Ervxy2<5YpWGB?5WRAkTMw0y- zd+r*D6@TH>mo;kd2d=xy(M=hTgZly%e`h~Vqa{9e%waayrU>WqZ@Lr5bOvktp;d}b zO>fC)(gS;GoBJ)b^7X$UU$SFa$oloa({8L&~>L#=Z<(Cb2wOss( zD4`BeiH;Sx@Ij}Z<8)Rr`slqO5v@YqFq|vx`DKF5%8VAVnqT2!mQPK`NdH3eiHX;N z#uO)8@KSZ+e#~`=t#&rB)rg{6$v!5~7lAgSuu)UTt+7(*&9zWVTK7mSH6(!ty^ zK}(v0INzNRr1fddFs_@Ln{uhn5=-XMD}Rto}N(9zkmKb0^7yJGg-eJ13(wqFJJ^eoZ-|vWD;;))C?;~ z8-m$)SsA)cRE};EkGRS-Eg1`)c+`o^oP?NMxJjT#^~iBE+$+97S`Oif*Kw{K75Et` zZ!rYSu$%Be+MOiB(WFJ`cgvI!S1=Z-g#CVP#TwLkwM_%>XestZv5K8YTYrQyfi#f% zYpgW(NJJk7e&6K7=(@cWF9zdWO5XzN_>53_<}w?*s#~|%D@qSFu8mo(M!6{tsc1;c zXI!4`h&elzn>93zau)?%qk^iH>+cQWo0XM)`!R%trB%g6rmMUHgT?1 zUNFR9o<^JIA{Rr4Of6F!F#{?KdS?n-iV?*aETQuN1&A`WFeM!6T0m3kZJF?_l^s@M zVhSq-x5FI!K^4)jaglVvp#`2VmnnKB=nJbCGnkbr%t`3Ve>e4pReyx0&}j%kGVgrO zB(s^@MK>4Mk@29g+saXi)pbU_h;IY&$40($>RwnaXxbDLH{ftH=OE_HbxhG_+|&a2 z9%MI5)R^^%qbIGiCg9LTtJO72YsV=pTX7?dN#=7?h7h4d672TC#mh0+j5?x=j#??0 z(A8MIZl_=SzzK7TJAaoui7D?o5A_z?YqHczsNm*$C;P?E_#zYC9{v% z{{YJoO!EMdG=){j92`q+(+l8q`Lz1d1@EGow?w&Yb|A^cpm=)`P;%dGHp`v5ycUL5 zHLmtU^?F|Y3OGVSjLY{!RTuEu|K@gVt;*p z^K;406r`OvH`UZE3Pnc~Zv00ux_N>&d&#RD=(Fu!vz;>oEkqRS@_xUH5bheMUU4Rb z+q8kp>}tvZJAYTyT^87f^_rAmR!4aorY0BFr{FM-pO@7Ha?8KVe#9&R|Bf_mY)Kvn z+0=0%TV}q<_7k(iUP;O9#w}9vTyV8uXLwp^?}5%jyoCYB2y*uT=M!mF+ecLmVHO5J zpChdKcWaKR|mHmk$l*ePEH zHX_@wBy7t5W>ReBxwj&b&2}zKt0y>k>GG9Bdt$3l{}np)yI%>k>HNpZ>1n3^tJUrt z>c5_)S$~$5D)|!T%tqtxM^n4nu9*t4i}Ao5=UiV5mX4!lO1yTc-NI60dp?5S1H!?H z&%FA~8PF-aqRI1&xl@b@#=`VxBL!htGz;z}nS#BvUHHfxSRJRlR`VjHxkg7l==*c< zfMZu5zq|uFN}cPp5Dpvyx^No%q}y!j#qYd#qkmxg_x2pq8GJ)#vx5Fp=D?FK1Y58F zN3A#Q?EQalPEL*w`u`j)tpCK|F*=4F%0IPq?6dGQgK#Ia5$FTIIz8S$LNWp|in^veo1MJ=lT1rZu&*Ofq-_+!NXKj%93B|K8^i-XKzt zya{i{k>(bNe2Xuh7&1Aag)re24TbL%g?|O%GQh4CU#<{KNkSd;yeCVi3v@jVvsvHG zgKxLKRgY({m91)KF4k1HhS%d*(cLtbuax~zAyXdN&BZA**KkJ7W}b(slM4bj(^K74 zU#KDG$w+Lee1+1xATG?*Mk$jhC=JN=`fms4!`pYGtKm(5*eedq)+wWeO5oU*WPgz> zQZ<+c!|~1e<>l?$%d5BVK8*|0&*{0>GM&25sgb#psUQw*h($zhKlexD!PRglVwu~a zN{MAh*4f9vnH16Nremwuz?bUUEs57;23eAe{)-&=}rBbCmS!F>g$ty;2}=yJ?({~c9y`YO9A&UuX8I4s z;@@bocov{S?YCC^s%3l%=i;nWIpwI4?^*#}eA##f*(v(j{)gUZ@VP&_9rfSej;}60=tU!sAuVHTYCqvVj#5Oe z(4RG!H(X-B-+@F}Jm$bzF{+HI-?3uzq>5*~Q>jD3qbL||F|IXhebaY?8KI(CnxZLU z;;WQO#GO-LWIc)`=&m-fKz|hvXF;CfEQnd>_H_y*y){nuOz&IPA~Cy_46Yo{)%G-X zOWQT#oPGa;a>#FoSH1r2@cd)Hi1MzdPAZ)7&^&*Q(;elA*PQG4_Rh&vIcZ{|g%!@2 zqy=dPGAf4?5=(JbkZ>-#FSmqIebuw}E``LNZ`s(MgR+o$R?BXME`P?XO{(R`S48I! zX77+*H)V&ja7xrRm3Y|gG?|%@ z(sN|{KNz#&C`+@^bbok%!YtFMe97IKN+BM-nlfWRCf= zx%$n@rgjsX`Ul(6tVM#TyvW?^G)9+9njUik7qYV%r4J@n@qhdMp4Q4lGYZdXZ99Kf zaw4K1GdaXN%Hthn8T-@()?a?f*-DgB%$J75s;^^v*TLsq+;Shj^dMG2PIT>|o+RkT zyyzxOnf&OQ+~}r*MhP72K%W#|PH!bQ93Wfc`0UQ&P8^i@Jx`vf+xYipcVVkEa=xCi zC!OkByIDjzgs@#KM8$Dd91|>n${l7d9L;M~_shs_9+oYLTAcPepeG@; z8RHJ2Nq?0nqM^^m?PbzxmSxrQ+WMAIhtS#AP}Nl>cD^uxp9&Z2@=b^rgAOF%a4|Jz4Lo$UQT$A|kro~Lct z|3B+Opw9_>OH05D8UExvd@CkDd2zzC82bWYIGFkUw{rU*3IH|&@RUgKCi~wyJ;~Vr zt@hf#7Rxa zS}rzmEP>)G92>Zeq?70#HvN9`!LT4(-qm?U~#E1pom5|J=S#zytuQ004%3{E`3w delta 5932 zcmV+{7t`pXFWfGWPk)|f{KB!6oF-6|o|KDo0vj4Z* zr$^tlk5602$E|j&efnLiecV22eTQ26Bcc2Xgv9*2){}8%2V?gKxv)<@0~dtRE?Tr} z*!NdIT`WP!nghb-#FzTs`ELOS^wwn)G?t9x8K8^t02$_~QS3n%Wmu}VSix^KT8&oi zrH6dD5?6NpFR&Yc*slOszy90pcDs{Z|HrM)Vf{bHMUycB8h_Mf6V(bK4n7k^GQRAh z=G~8?X$#VfxJb8bD=b?lp6Xz3igFbjaj22LV()v<125Y(bQK0A7HA3DLX@ zCcr635DJCDXZdlb80F$vi3U9tcQLR@n;fod3|F23rLz}IQCJDU+w=0Hy`t`W?L zkQ{F@P}MnHntv9Lx~EB~FlJkj5eV|zHdD+^hXk&6<6n)V^{8sL%K|4<6W77SCH957 zQ>(ptg+?$14%7xD8DbO=>a6!kP}VMN?vNCz>{+Q!)!USUz(sn^B1g1>RKfEPoBm@;O;x3DKM`n>Bs^I&j^w zp{9#|{-yQ`S;#0gZyCP5pXb{MsC_gf5ai0Q3s|q=usY zevQJm2%E+NeIc%^#%#JcXv#QRvY>&+Ak}nCu`2+Psk({2V5)Bh0@QDg*caMR_!yO( zxi0XodVkZFS&XZjmcZy0Qp6%QMdOf!klt|4Bm9i(=?}R!0Z#Mb4uEQWg6-_2DDW#w zL9@n%qSRIJ?7$vGgOIilt>=R9uJMGud8FfN{eyAHyoZeThi z%R(1{kj%^UBUCcvfy54$xMu4niH-=pLU%t33xE55-ttj2UBUviorKXfCyW!hyl}D5 z@PP_tPU)Aah(aRZ3$g&jL9l?^fW(|}S(W>3Ms4tAya4pY0=H~VB(brpu#dqGRBO#|a4*qJQL5&X`99+^pz5JyhzAs{79GL@ zQGZ4`V-ii4NPv`TNn^F>HMk1{O<|ZU5!_?XcL8#V4Uyz1?cH(PJ&& z=&@EyEQuAf6)X+Kmr6Wc==q9Qbr)eJz%2dHC>y7|weYbGU8G)!W#+Gj6tA%_ea~03 zyXv!I|2Y$K2=WW#cPlv|=R~aL$7X@+sDB0J5P>HyI7$e_gd>N6npcS&@PRsrQS@;< zh`QI1;#f{>am~2wqRzigkB*CV-yEmZaQhTK9cY*hhGB_$yT->D1R>S za@{&nuY&QGBWphqS!^PeCdG&+s8=_o*c#P?S|(09azVc;ctdYK3Hf0_ND=^0A%Pk+9HCG_6r z26ch$Ib35o@1mxsO?T`o`F=w@7)$K=1FD(SmI2qiFct4P3;bwT zHv;VZ#EFC}YJ(C1@5VPvAAeS`-h(u9Vl1)RD;Zv_8N0WeGVmL=APp_PLSKThp_LJL zJ_AKeUC|gszsJFJO76R;el;Fjb8tQNRQG?+#=Q@hD@jqWqgQApgOKrDsoOtLrUZ)k z$7|HosWit;rrL9KN?+)lo_UM=}^3ABGirecYn<#G`LrzQKHlZ z*+tC-=1rGOn&F+AUel7ZE^=T(Fs;>;QRg!3tc0c((*&p-^g=YR(9cZU;Mn~I4W`&;6-fq|WrG*-Bqv-+$ry!s zj3oOn_J7e=^b%~{R*0I%yyjsaV#?cowZA5OPruNI%B@b3n69Ye#&Lt>ULHbC7 zXMYq~{Cv$25(*#Wj0J z*18sxfD3unZWT2OV|~_48TV)ZW`C6N>(3*wT}(U^^~(_ebdh}nhVjE0PCY{=0M|v$ zFoU!qn0=S!q3cBE=qC1vvrN;RvCx4>9mvdah>3-p0D6><9AAcdMHfiRAw2Ooj+LVV zJ0oQ+h7B|1CM=M4CrNNLX;JvyGHJvWgheV~zh7Cg26TSerW@}lDfWf2ihrC)ON26p zG%fYpSgqJ2k!vuQ(zk#*J|k3~xy;6{vShl&MoyaYIMta!s&^0uVT!`qqK^^g%Hf$XnR_u%(ZjUGHD1ipt7KM zCWWLJPz=Em+T&gTD3f(kLXpn8GfCB!afDjgVI?M}Feh(2$g#yy5&RlsM5p#y;Q4Zy z!dC*mFi$W8*=Ia2Z&sGF$?U7bG==IpY?68Ab0(S1+%CGgxK2DPOn+)K*Ax>s;BYhN zAm+?Dz|9G*i95 zx*hjE++JS2zrE~#?q62Lnos&GnFjbZUU5ojp}NjO8B1*6X@4nJIu(Y%{_yi)bT$0g zAKqxTzdpbDcgf}kq@5_eX=)OM!lMy4{wEmSJVBeyzf~CR*)~Ag&Y6K00*cr2e&31^ z?iy=VQ6_}jw1La4n&5z)E9x!_)C6$LzstVFECTN~s5gG`vfOE#rbOQO`U)K2)&HcB&+m1;sS#E~V0*`ee zeR^x4D@q>n>$OnCE4x?^CT(_ZRvgBc-y>WHAC_0#qW3@3i&^wHNN|iFq`uKPfky> z`9G~UZw~oC&v99nl}h;$CTG8K_oK;OY}ZVN(8YLQ%K6qO1EsxtnG~NLa(A$l*q)Ez z*MM+v;(wY)pE(0MWmhy=&z7rVOAr=@zZ$6)hD4(+FUb__rR~B;=D@0)?pn=@kmeW_ zb))ak!2^z6U4C~5bd;*=vk(p(0y=RP`=r}!YRz@tyHRcX_x2pq8GJ)BufqDL%z-Bz z2(^CwAGO}Jv**9PIq94p*8g){Vg4TmkI^ygkbnNCxnrM+e;I%~iH$%PSdS&g5@unQ zxH6*&otfI}$a2cov1jQ>KaVI1Q&N%ucgY;gjr2uw1`DV-WDCAnwqW7FULv6{B!TfX zqZAy?P}Cr@*IXplaK)rwyIt$MdA#^sFQMS6>zXvfieu z!V@G4A}`^|DAJq)kiX)C2Zl@zXknYMM8EKv!muD*2H3UY;`*3N63ao)dop#lV6CUy zY+mo?#L{%tkdc7i+Q`!{bq`=znYy%U6s2khf9pmd!aCGsiGSm(4s6Qzz%a zZKkKXsoqdS%#&`^QvM3LcR^g3sf|*`%U|kE?e*Ud&WE?}Mpwg|{;*f;Nnt+SWGb+#Sdy2x-2XOTHjhG^Wsx&3(lpWCbJ{!oWmZ-<{g z?liXC0ZgSh6}~5-@~YwK4aVnhFZ;Kju6yS<{odQ1aAi)1Dn<6;)7$=K|K_$lOr1nz z9M+*q&A{9o(NOl76^M254ArEWZGV#OZ|Q`Z3Q(H)hACJe?M+bPChM~IGx@kNyyr;H z+%immNEZI{Bn#_>Uo{&{!CahmD#r^o@LltAiw~Qwzx$$dY z-<~t$kbbUNDBM>sO~>bbEazw6|Iix^KKDnrqyGEb@zuo#tu*ow(mb{%secmgV=qN? z722}~@`gjq_dAdXipLx{D+ZMj^*g3po>Z}{cgj;pSQG`rErzv5*EfAPm=P+Pr6HOk zBECwVL)g~BsbC>TAruiAw4);QTSy>D3y#OziwuyQn4+tbu7 zE!T*1_WcjaAio`6_4>EN^M8;1BFwwiLR6?c&@_LInuBt{YpM;tz3PoB^$*5dSfTPE z%}6tlQ7)U1ScI@>XyA9Sw6H!Y+WJD|+#(Jrl`8Y=s@fUTYbRWvzH5e4vVUw-frs5{oXmuj zRyTdy6(?KmE)KOOFJEht^Ju2k*k-6{ZFVSTsYKr{#VlzpU!7g21YDR@=^>m7Iv^V-^R0N4dYFEMlLU z!1~LpoUK?XMSQ7CthyfC`x<;+<(7N#rBze~G10XZImxCQSdpW(8+)(|s#_-vl#GNQ8v3s68QMd8;Mt32r)N{U`uqTz|t+C!JPe&@(VfBrx zG<%eqd54<7m#GQd-Yna|g_ZO}b#~0@4{NPuwwAOqW3NiR>L0t1vb_F>Pq1z6FrA4Wa?UvUO>3n@ z-Qe1)A>A{y5iGcuz$tc3Puoc7H&q&)}?#)zmf>;Y!zT7GVxs z*e({L;y5dg2^K)*4l@^y<~6GOW#~2!%N9f_r@ap7NdRrexI<`iNfhDGwQ+lyw3=mE zwft&*OQ=KW>}xFA%;Cq@Bt*4)$!{?EAJ{TnwPq;eRP|0RR{z@^5niPvQAwN7MV}XW zPmx~{TYs#X_p+DwRYh%h2G1Ws%vKN)dzP|K!q7CIx62~haTLXJ^=Sh2*`<@>@8_0H z`(f}ufl$g!L}h`rS9zq~Na@>JO-_`K^n5}o{H#?8lZQP0OUZ=nfrGc3m%o6H-HB1E z@btsO;lQDStLpy$DTjb;*#EbWj&k4sIzF8L@qawmhW-DuDgs?6@GYr;7ZUu*dH7aD ze)8aiXA$-V%y1C%`@hQNf5-sXFu+qh!JFiN>+~ce|Bv1r{J+n08^zWy%& O0RR6Fb&R+Gr~m+=f47nV diff --git a/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml index 38ef78cfe..3f0ffe6a9 100644 --- a/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml +++ b/charts/latest/blob-csi-driver/templates/csi-blob-controller.yaml @@ -91,7 +91,11 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s +{{- if eq .Values.controller.hostNetwork true }} - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} +{{- else }} + - --health-port={{ .Values.controller.livenessProbe.healthPort }} +{{- end }} imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} volumeMounts: - name: socket-dir @@ -117,12 +121,21 @@ spec: - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP +{{- if ne .Values.controller.hostNetwork true }} + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP +{{- end }} livenessProbe: failureThreshold: 5 httpGet: - host: localhost path: /healthz +{{- if eq .Values.controller.hostNetwork true }} + host: localhost port: {{ .Values.controller.livenessProbe.healthPort }} +{{- else }} + port: healthz +{{- end }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz b/charts/v1.24.0/blob-csi-driver-v1.24.0.tgz index 205d4ca26f9b052a773283c31166d2dec12212dc..ac645c6afacd28a9698b9dfbb7c916c478502e0d 100644 GIT binary patch delta 5963 zcmV-R7qsZjE|D*gJAXZGbK5r3{j6VsQF?Q|dqTbWC9P)s!LgK76G!$)PI~urIv$8z zNjQ@L3xJAM*Z=(n04cspl&sjfJE0#MlZ)L2xVuPCYCLT<#4h9-n^J&E*nei zx!>)rwOXy#>G84tf2-BX{=d~eJ^8MEeA+%ad2@PteDqzbeSh43^X5C$+8+z$Ef5m( z?^;i$l^xuFNW(t)3|tUKyJ*p_Vc$>u+2|ZMS~UlR&51Ad!}C7^4(P4RCTJ`f$1^|| z;{h_#Q=`~}F3QMMZIQ+J(t|#=>Kyx;^7%| zYY0uSfD!nti+?Er>D|z^f5cg6qef5;Tt+t<5A| zvH%J~Tyr*oY80a`zUq>o*J-pnn+a0pKu;^$2xde`j(@ipsOlW5sl}u2X%;Gs*%o93 zg8Z?~6m!!d^+|tf9IYo+vt1TA;fiq`Ok84Ls3*1Bt5;|QQ{X^tK$0Ow0Wpp|#sLYi zIY^-%PXGl19Q0)lRFffehP1p2g@VJhi{?`LqT5U`-5{*#Fk3Vw7I&g)Gdd-+fP>{T z$Fdofn14{9SeV&#wd59UE@C=-1zBuaJez`sOXeuebaFO9L{* z9@J_A_=4DQZrd!NQq8Z;0ZZs2$pb*I&@XBz>ff(X2#c@*7U&CcT{ULY#X(cX(UJuX zGzO`rV~Sk?h)mT(^aWG>Fc6^r=7@cv4MmSp$$y#a0`ICXZ5eG`J+uTyuaIIEu_+pd zEQI`qa~{!WTu*-}wFz`u4tGFQ;}dLWCq@-`G|OevFC{iS6BJC6RJ z>moS^D@G8cHse zdAiW^6>D`DVP(K9{n9KpPC2yju?=0MK8R)JuZEPYu`hkkSF^k7vl9O~6LJXh3*&by zJ0a&ptmemmW`XM{g>s0%6Bit11Y*XK!$8feL=N~s9mFX5I37gZYe-2fC$_j|Ty|0C zr_-b3;4btW(p=gG+2Ix=8;O&@1{8D|zS|gA_fyF#KG*m_q!@Wiuu7ZWo5EE51A;Mf8_q-qUN-NvrjdgiksA9)L)HLfL+xq`=E{>qNZ@##>IT{X}B1 zyHrl2^F$iih5Tzx9iC2!Qs>abDeFH5Wm*{zA4%o{53R4pVxc=&c2Qlg6r8!xNLwgR zOHe(e$b8jXVXOD=iI5_C0n;hivWx0NHn!*B1TNGo;SZI=SHk$!saQ`SQ`1Xp-Ylud zFJ_Q`^xFY-35Czh4LtY&%X8ZXA$}opK5&3aEe?^Ad-(nn#S2VaWu3aHU1V}js+pXl zoY4TPu)cNKWEu##=InmC;&hUFsu2<@BzD~hegzr>ug*8>?Z$EAq+ZmmQ_!yyckDDe zjkf71Kz_vcS3b~p#D!ttC3#h&uNu7P_*2;&X4-n@B}x4qA54NG@DFKmFHN(BEwr{W8i z_S|8!uREfhjBueXGWv9`SpTSZDMs#UQ@}FdS{A0_J!gR*?dnE=onJVSa7ArUCg9zF z_-5(D3fFs(#!ieSR(mBQi#6l-c2f?1!xp5W%~yD?V0>uh#GTJTF;iDO2GQT+U^*rD zT~xmskF7blo_eeMzh~p#hs%|vsMpaew6a0SoMNfRKTxI&iul`W)a$?f^!_y(4BuV7 zM!%eohJ)ez*Qh@lU5%6*rhMtpyl5tW)Pwl%noDSKuSTOxsSC1;nhVUEE}1mLCpE3p zlCv&yU_vmh)s$1`GVH93rZ&?As2ub{GOy6DO#9&2{S6JK+KE0t?#cP66?LvjxRceqy4}Bqja<0L) zqZ*PJ^pyOCxb(1I`c&)Cs{c^UfuM^4S6r0Ok-30=eX6*od2GjzUds! zf1jh>D08zerlU;DEZm7Y;Ja)I`T#{ARD@QgQLEiBWY59A+kniRY)C8v@f!IqU;%3X z*sXf6kHuy@hcp- z5;aVgf!4*wXA^N$kXH-foH&5~R8A?i(dm)?C-ICUi0W3145*|3t$C|m=rd)*i{VbO z8b+R#6Qn~4lV!}YIooa2iW%1+F)^PLDyOLa7h(NH)YAhn{|QVlJ3^f<>S@Q`11f9DgfQ&;qPb|- zqL62Nn4@$pGA6Nqc+hZpFi_#qbD0ZYL*Z&+j;nOgbQ4;H^2>(1S}y)Xlu!q#M8^tT z_@KkhaR{pzee~Xth*qI)c!VqM`DKF5%8VAVnqT2!mQPK`NdH3eiHX;Tiz!aF;HB!s z{g~?#TkY&&RwFveO7<~_dl6_OIx1@Fv-rB?!75T>@Q2oaxrF2@Fdtd)%t98wUo(Wn z0+2?X$Qp?IBoYp~1xsh9+n*>&H8UV1fa;+>Un}@X(XnoVe00#8q`jU@Zq|yIN!5xL z+3W~&z0<@?K3yt`T`Z9akhotuWoDQ)F6q|^Y%zw^S@`P1r(G~6x=0`3mI+$Y9K1ozBo$ zu`a+?j{P-fY7ZJeM@a2QyJ!?pNj&J89pu{aB_bg);Z%5JixJ1ZZp@RF2-haF55vzq z$KslU-r+UtX|-yka~>LT@?Fwsg?idW^~B_w{!x~HCVG6#y<3O7y9rw28G3xIn+b3Q zRODxWG}G$Eapm*T}>oJ;9jKpmeED$iVIV^?+S7JEhMp~m?ztJNqs#UT|9Y59!HvmG&K zr*gA~rcv&qplei6wQ~L4K|xd8Zq!emfLuW<_1qw4L3zuJOqEy|>S zFU-v-ULjZN@%;}M@byVrOvH}{;x!x5{YNUl+8KUDlQmeFi)dR zbCHXok4!C-5HSNP3wmdgS&9+G7%ZXV00oFL`7b3L>HI&F-ffxVSt~oN#KaV42X2Qs z_Jb;-U*jU_Btr{4UoKPhO3)W(EM_o&D^r-0(3Ssgatx~oO`+2ef@I$LoJnRgw~KBr zt|Q|?VYii|607TsdJ*3S;*X7d=hVG0L(sG-CT_stX3jy(nd_KD&A6!r@IAThc|=s%iDK@%YKo2{*E&*`(5-VcS>d-vHt;6B^pr3>Cgd2Wdm+3Y})i$U@BBB12H+iaFQb$Klet!kX%hwAmb z`WXg(zPY&`-(HWd{_)RZ`gv=AoxqzxS|TNDIjq9q_WW(bURtQ$U)_#-A8s$N-rrvK zKld-IQq5;+mP|v^04Upz>Np&x6s`@MC{?qs9LE{O0G9ohe8= zQ7hHdEDA+O6K?!VFuHkyHhamd80fR@UbCGu11&@p>+*iTiV*G^r(SU!CWPCxfy?Y_ z$^ko9)LjPw@Ara;A)4RVTID( z4>}9+79Kc8kh>pnK9N?neN@#W%)$fE=Q!5+iHmk$l*ePEHHX_@wBy5WQWk>GYLD zdt$4Q{}np)yI%>kss7{SG;{w?r`7Hp^1q&?S(cSb`4XmPqjC46$z5&NOorIScwkC6 z*C&Ie&rvfeUOVJ&VJWdaAHnYdf8pT7b*?^h26W1)j8@x-=BjA9J{*Y@($=IRm*8195@7Y;xzV2x7pN--+Avw z!S?U%Ii@rChRkLK{in=(?k8&v$Qb(6NAU-e;9U1|J2g4 z&&1CR!kx@Upo{9ql4A+8@MZwzi4!_A_4bkTmA$(l%SZZs#8H@%ltj2o=4fuDFOoA_ zz{Me3^u@A83kUWR37sJejHd;q;5v>XfcSoabc*QppqDztTAxRn0CcE`R`nm4SBCU{}=boAiBa{Qw3pk;(@ctM=XM-7KtH=F&umgQd zYiehiWbg*KC$ixk%h;;^z0V=MHKZVT6W)d+&CL(_7GE|oWO6_YVZv(}3g0OT3&Lf9 zT`Rs&A(oPaI_P;%rcM{=dKzZ4zMBW%ZhosC&t5ZI)y!P1$!-m=f5fq(`)Dj*Df=Nq zru<|#cTSnPdNXP^^E^zQ+#zr?J=IP1g&Ja>Jc%upuTXjy#D$sKC}jc#r3bRT{@cO% z@b=y4YIxHh_KFW?>y%MKC2(v@vdAT=8cc)X_~!ib^7ifJ)!TQU#)aYM^xSKiPTk?u z$lSwJa1L#VMMQ2tfA>e@!PRglVwoGEN{MAh*4f9vn zH16Nremwuj?bUUEs3Wzv!%rV~8e8tttx}qb-jhgq)#&sFs zGB<){h;0sFDBsBn#Jcc>0%>MJvi&WcRZ{^=GY^=;1=8Mxe&K=YRa9$xj5@o4i9SNyJjmFUj|-5_6jcJ+ph#}B#@ViqAoj1 zwqEIJ%I zdVFm9ZZIQMG)u!NMNE8^w1&8I>Wi#Lkp$h<1{SE|p>E?D>NdY9*PinD^?aM69XC5-B;n6-B)B=&sE#`YYPh0L>B zc58DnW^Ga}Kfb~_hcJ7`^SY@d`Wo+O;CHaJu)fLL`a!f3~S0!ETjbW)f|uK60(HM_<~o}(&f z-QKToU&Wb^XW0Di=3CX@o2%cKY-%^Lsee!{%~~Xg%8SgsPGfY*r0Fpya3MRJQTSkD6~Eta zWmV=hqp+rG+xfGS6A}H`$l<)B{Jf(qx}KWA`pYjlTZvMN`O+h?>g(9vb?|u?x7>#> ze=V9Sh>5N(z)6B`jEio z-NwHMy9-;TC+F)Kdr}478t<+0bfoeUR=+`(7LQUl?@$8xG9`f9yJZ`su+sWaT^w`k zhqcx-TgzIRvsa}&^-wpNZRPbpe1dIje=nE7Z>aw}Jvu$g-v9olbEyA$mX=Wh_0b#V zVp9FX4Po)0`I)B?b0JkAq@i&j=Zup-^h%eg8(dp8q{Wpy%;d}pD_g6K3p%)nfEr3rnzzk&lHLANr=>Kase_Z{; z%#-?;qkIR@{dORz?_jM=)zmf>(Ms2D7Eul%Y!?erahw&$1Ph>YhnWjU^BUFtGIE=T zWecJfyuA+SNeFGmxI<`CC5mY1+PJ+;TFtVoT3%b<66z2-`x;7{x%}Figrs&a`3)xj z0$YY!YlbpTRqxbdmtvbM;;WP-e=2Je9{cA_-c#&XBo=Gtz3k(CRZ|;YG_OO__JmuLLSQWZzU752MOM8Uj7Hx*qsEWicCK|d~-@GUI?FJ$h{N%+6&tmKggyCT3_utCxe<%Rh z2*6WLf;ZX!*6B&c{vWpv=fBU>azT)^qZh9Pne4|xL$Y>BHJAZv`b0RnLcwX}woCv=&bb}YVD7O@)ih* z`FE`+)5;F+f23ibd_@gAU5bRBe&P_|k(uv{lQl{{?mf5c^dC>(~F$(NX&>yZ%p(P7mwf);|33~0uQF}8%${gtGg*Jj25r2~7Ee5JOhs)IBQTH?p6~=4} zG6F$<-)4%r>5%%QKQ@lnld9P+3!HGr7zPuU*q7=_t@i2_8o?AeP#chBh*3a{Bad-F z0&EUasK*mPfdB`6nFG~i2%RAVt z`OL9wMt>zH6nInNvotcx=VXB;M02`q*7WmX;JRbOO$Yt*YwZ=XklEk7W%%`$A7E)f z2H1mIO#ojI8!l{{1yri}wK-r39VB@G=oR`|4MqL?H40%7HoyXXA+D>&Y`QpT$~aoG zpn=99)pSg;D*%zHdWgPYsviad)ZZMjFSMcPF@GvKb6wya^`$Md8dnc3fzd0Zm_=-g z#vuzKzu}xm^cmOFA4+WkotDEL5Y_ku+u2D`=vPLA7L7~AsjaLHg&I@JBvyZEnZS;t z|L3|$&H?$%5jCGV7co=^Jv`>oxOf8YIt*7Dz;s5Ig)RdjnV0EDsAR|ki5)C)&DJ1^ zj#CJ|LU;cX7WVzT5u|v!f(2-k38Nt=j1#%Mbg|I*feK|=>5r+HLL%S`vH-+Euz=fu z#GG+iRr+m4ZSZBh0QBrM)gtgi(w5HwJZ;xCYkrcSfs=*hBCl2R$g<7vhspqJGqB9ihis9_X=FODu_%uoWx~ zC6`J)UF!LYS9J$rrNAux(rh+PIkfPx4IQLDh-K!lhLo(aFFns!v%BiE68||9ascuR z<991PA?HM_=6}a#f$OLRg4~kdjzVY%ye9 zc2N7rv*VNEy>AZKmGc;^UDMlxOKdDUNdFbkYx)r@dFU906g|8&{0v=8A%5kunUZ<8 zgHAOrBgw*$E86fCUmlSn`b#nI>AmT+)%r-nryPC@Kz}5mY(G#^;N`ltqh1B$Ehl#L zL}IbIR8FJ)L>k$R{A*1eo=%Bc&Y_A^+J6j6wK5()lFS7jYF~}TLU*w2pt{~EICG(q z)=-|7pn6D=`Kq_VR`1;tAw~28rceDv zwWJ=un14djZ~N3G6h1RI@ZbY1FKiox_?gIg-vKJMI7Exw!}p&kUSQ%X?bJa>MJngC zn#wuODGi_s>syyirh$MVXZOn$r<2rEjgU|wvFk?gE6^Bt<=?0uHBK6*^`dU=f`09| zW4qCA9GQ*+5_yl_LTiK1nv+U%0+s>SvM?3zISc&gR5t?b{LG1jYifg1 z0e|ntH%lK@xZZ;_dSWcGIx87jtQotvlQQrdwjd3yzCyo(v7wa_cQFISOkMF9M1PNi z>6F}eQ2ly5w&vh^>aFhoo{hU7u2!0&UPrIcN(Uk1#Zr%dpiC(g@sHQ2+k5-z{cF@8 zyt{sle!dtD`h)kcQExQ59w{?S+0vnU(SJ;+2l3xEm(bu|jYg?b7i0%D7nnC)GHHfS zYI;pe&N|3}3Bj~hQ%0T3u(MK{T1^w6vfmBKyh6V)ZG&U?SJaqr(2qhZUK5n+6e`rIQiJrn=zj~5 za}Bl~)sRHLtK=`lrHA#>Uado`+Oqy%37+p_38Fc;zUhq{@?Jif#-{ksN$V)%|2;iB zJwEt4c8LkIn{`S@6?X|UeCkEx^W1R8pNwP~SK1~lBmgM}QJ5(=oU?SDN5S4UR- z3J0!44U=V{*J9(di8w0A>jiL596*04qm{$Pkc*YP!bt^^&)Y1ReywxtXPucKd zxKpf#k!NKD=}^LC8FOrWyNy~g;~FF;=5s>j6xIJGtiOqRdI09%;pq_lp+o^BUK!u(3ftcE+V^({Xvuyn3 zW&FAcKGjP?Z8vbrMpMF>%{-q`eOO4i*Yj{b=F(!*tw$bOxz=Qv9(xqzh#pK*Q^sS5zCgv_*^fhKiH{v~n2oO~!nv56?!+;j!PqumkebWm3t_8w4KOD2S2-xtlr zQ7sC2#?BnI)*@vRi+=|Vw+90ix}M8S_!=jt zD)O^Gn(2K2A(=7DUC%w#_A7QA4nmxN|HDPMJL-+c|0n=-0Xx7g2s6Vr zLsW1@79dfQ$^v>=WxZU!0$|)6mqvr>Tm^RkqXl9S68e-onN+T;C~%8#l9$3(GzKnP$rNDQh$xriai!N zfVq^u1=R5wq4L~iHg=UI(XWM*?5#z5x5Y8c^Vy0i#BuZoU}~p!3?M@=$%O$DMl1yu!Q!w z7a+=Hnv`&)GwnQ2{`z*@{cwAA{r>i< z_qlgfm1;iGuVfnFw|K=VrG@G`3ut95v3;ktSm{(42780g{n7Q{V{dSy#eR5k^Ha&; z2Be)Rw`pn?g`%SgH~uRa-8?~?&A(L`?b!}M+0L1P79xt*@_xUH5bhdlRdFVS+q8kp z>^8vxJJ-}*7U+ldD=EROb`&;DDkqb=4jX@63%1OBk?kjDheLpp*^Rb;vYfPbPJT1!fT%2(W;2#?N)q_}^dF z`4r9lw|?4=SuI&^hS36#bs>NHXrQY|9`ft8P{k{|SPv#`c5dD{j4!`SxDGxnuepCo z+gicwv~v`z;*C7!^>TSUa+}>@Z|s!s0vnNS*b+7c5HnGl^6*WO$YwhiMrIQnymSV{ zU%OSv{|T+}-LHh%l>dKvc9zZmIcgp5e|eT>Syn3LN0^-b#@)Y6?&49+WC&f3`=*3% zeKJtmyO&Av*&%laONs6I2!0C)2Pc27bM%?hr&D%KlQnF)%C!VzVfd?&f-o$a-SUb| z!Cu-fd}I!+O6Z1aUW7DPr>F-#e-0jS?CO%cJD{UfEuV#O;1JM>v)Ct{W>fE67u_2L z+q<{tn9krE(s>ouKV=R)=|ZUW>;JfQdiExh|NrLn|Fm@M zGx0Bja3{49=%VVellgv;`;J*ir}!{O?jZ!=Mg3V9V((-J<0)#jwIV8 z5GodTO&!eToSBugdp2F1>luHMR>&@LPtAn^$^hyHoX}Z#e+Z$o{*1EKyCFq_xAdGPJ#H|p{1HM3F8%;lQw#_&oME4qJ=#PZc*Kjdwc zyJd4u#>~}=QL~xnVd~^OxXtubH`Nzvh9+- z-ROF7(;IY)y{dJ}D4`NKwk28Qk`xW5{$PA_admb3_UiiWyHDf7*l~L9wM?h(@M&c3 z;VEz;8)6ZW+t0nxxPN~=*oj!?Mxat+*^#yPF|_u^v9*;-<4$ACUAk0CQ_*`8DX$uxZhw67_NsULY1qBE>2=@kL@RS6 zREF5*=!3GItU#=bJt&Z779`u>(#bCsur%|4DO@1!O;}=(b(w$mncUeJ-*Y5VZW&TP zB=-J!5_|O`t(v)|a4yckiCM-*!C-d8wup4qNvM`()cx~Z!ei~ z_&o;}o^4hiO~=xGEa&In|Ii)vKletrqu%@5@%810+u_B{Pk9V!8C#P83HPx_A!>!T zr@_48lJWfxB*K5w#FAFM;#h2%NM2shyW4>&zeqXSu z&BUhuL9H}vksvB7GWR--(Iu0n$DF{0>}*EigNarAe!pc=8E-~mLDaVMXC)^h`mu?F zzoXpWQ5IcKOR?j3sH^xObVamiu z*TjEDH&yf`aI6D;l6yIQl-y9MwZ`z-oyDCvD6xBT4rlmD>L@0)RrEK9K@ug!4v4>M2dUyiaJK=<2$puU5%GF5+5 z+f+m=UAtLCIfSrXEJVd|RvZ&7fXW?aE*#BkRQJosZ61~_h}!V>I-n;Zv>D?Lp~)pt zL_^oc?PbzxmSxrQtMx6R4xzKJv1l`wA6t`<)b1s}!Q{VS%W&12p^Q`2JGI!P*yf7x zDkX_Z+k|fayvlou{))t6&AgX=ysv+%YQsBt{uk721r@PpE&C)2P4n}1SwcIGqFAp! zO`$%!c2e^F+`4H$0{$mZN|}kMtdRDqkJJ|_{aUNZiBg=NPbq~zYgHoTp-lf;G9i1A z;O*w+FR;h%Bq&v6`r+Z5J9~c%t?K#zDVKn3IR76VALoAm>*V<4aQ=Uew&8#L|EwE< zu08mcwtyE>{K`oDE{n$iC!t%Lvf zd0H+Ak~Z|>ogkC_Sg1(Wp3pYfklSAzwS^Mg_HLqT_Y$Qp|5?|~ptp#Vnvk_zY~ok~ z#Z@>qa2rV{(LHSX{oI3U{qs7s?e+imCxMOn|1@*|U%P#Ja`^rKXK62{|6_ylp!pA) c|IiNY&<^d;p1J*B00030{{?+J?*OO(0LgB{wg3PC diff --git a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml index 38ef78cfe..3f0ffe6a9 100644 --- a/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml +++ b/charts/v1.24.0/blob-csi-driver/templates/csi-blob-controller.yaml @@ -91,7 +91,11 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s +{{- if eq .Values.controller.hostNetwork true }} - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} +{{- else }} + - --health-port={{ .Values.controller.livenessProbe.healthPort }} +{{- end }} imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} volumeMounts: - name: socket-dir @@ -117,12 +121,21 @@ spec: - containerPort: {{ .Values.controller.metricsPort }} name: metrics protocol: TCP +{{- if ne .Values.controller.hostNetwork true }} + - containerPort: {{ .Values.controller.livenessProbe.healthPort }} + name: healthz + protocol: TCP +{{- end }} livenessProbe: failureThreshold: 5 httpGet: - host: localhost path: /healthz +{{- if eq .Values.controller.hostNetwork true }} + host: localhost port: {{ .Values.controller.livenessProbe.healthPort }} +{{- else }} + port: healthz +{{- end }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/charts/v1.24.1/blob-csi-driver-v1.24.1.tgz b/charts/v1.24.1/blob-csi-driver-v1.24.1.tgz index cbf94c473dcf302fc9f2679f815d76fcbcc3a9bd..e478c07172625195e13036978e0720d88ea4b567 100644 GIT binary patch delta 6016 zcmV-`7k}v8E}1WoPJe1O;}?#lq?$OgM{;uQ|K`R6kt+!`3Ge_=vCil3{u=KuwMV$?RMuhtN+Kxt%Lr*L?a%aLAQp`1Pd5}&$`I7dBY<%XM#;- z!)Bg(>#~W0LVt*Z&jgW-FT1G8glOIe6W|mi2qj0&NT4*Z#IsoI)dN3p-5cf-d)Y;U z>5$1A4g#pGA!G}3!`Xr;QvtjhF(tTu+$llxxY25~Hj#A60w@S^!`TF?QH;9ys!M`i zr_t_gCPs4!+*kbe;f^7}SZ%uR>XC;fZlXg#T# z?XtiLSB#rr;u8B(J*m~+yg?(F0tadXk_<5lh;igG4oHB_K??PF0w@sRps#bFnhc>c zq~%p86da~qG?&sB-DZO624PKy*`g`2xED>E(J7e)94wzXmd&WdgaU6We3nLL`J61U zglJBe&3~GHeiOLv*l^QDzx-NzgDhm$H*Xn!z2!$(8jvCOpjH#W7sQ4O+hzfkYJP1F zSV9*`9sqiSepW+K|9*=?ScDC*KwpXLsxg}`4w^EKmMmzXF-SEXQ|t;rWU3ybubAqG zfdKV4N9-$YD0+-a&RiFGSAA*AXyfXkB`|t}6o0dbP0=`HA>=ol^N2pY~S| zJQ^2Iz+H#oN&}eA$g zGJj!o!wKU=E-zgyG=88$c~<&kDyEPK_<}3|aS$xvHXt!)TvnBSn^7Bl87qLEou*O( zKO}AW9Kh3lU9;vV`5EdjB9=meXW4EbByh{-L=qdjiuxGrKtXGHg9nLbic>Y8%=a0W z0aY(0Lp-2prE~}jL>c9bNigl%>lb|9)opidU^`RBfF4)tEt1&DN*VinmA?s$Dm9r zy?5t7aC~`QOAb-TqM9v2e zP^rZsQgRPJe5QDTiL0zr7qyE_&Pg?sbCfe0Ko!<^E}KjP0XLjIELWUPQcpEPLWRVx z8^NzYW8hVMquy>DH%{tB-8u#RI&sHNqtj@ajsoN-{BZ39eNS8%24144mw8eDyV)C> zo}o4W>^oRO?_F+Cm)M@e4S$yNE^2z(b;rK4@3+K*vBaJ~pcE=(s5LCz^}MnHel8LGJDrNJSlV-k&A#o3b~3_+w#ewyg<}1a-lZ71t4#sR zfNNQpiVvIxezdC_0d{`oM8Y+-L79N}W2e(7b3S)Pwl% znoDT#phlxisSC1;ntuz-n=Y9&!zVSZ(~`3;a$rI*t<{uM=Q8Z9jHWiz1gISJLNagA zFHHO3*!>j^rrL?VxacWmSB{t2z~~(mx3CaoM!`WIrg)|t!4NMZ#sef&_ZMnkCV{F= zA{P2l=*4S-Qtd*8+Er?hK@WW;a<0L)qZ*PJ^pyOCxb(1IJAbNmXjNM_{wu-rT`WN~ zhu63LaYH`HXVch}|2b~8Gx6V()3eh<{P!i=tui<3ayrVi%)*_h1HQ|apaUp6P!U>{ zMy+4*Z!V@t0OCZg#%ZjhN&{py4d()GJlQ=@_GTB69>>A$|0d@4hHE*>`9aA>E8159SVdhymK{}N%S;icj*lwd%%(w=LiTRvRIYsq9 z3F}Xyo*sbtcep!5f9Q~zvQqW`S4Z```YwZEp2w7nW(=DsV=@9TF=&IW*amjd<0Fc6 zJN0cEsDH4N-VjsUbj(`Ma+XcJyo_Hr!KZpjsO<($*=R~Qvzg~Jsy7RX@OmE3$6Q)$ zy7kCIFV~tZ(_@d491-u+=*wTMY9@*iuF4-2wp^D#&~E{gsabt`s;RwBL(H3642y;k z%4Z|VPN+%Ayo7m;B>NZk+%*s@!l^H7)ZPzVcYl?an=&2;^aU#Z&VHOaOML8@!)$&{ z5zgh@bSIAK4A%BTtF)Y&-jdOz2lm=F_j_vP>wiIxWXH0Q_3MA9-8wrcssm*Cn>v*l2kJTG6JX`>iAm0 zM~cF_3Gz{(H%WUvncS=uFO#YjEwb4W=6a`zmwdWZ7Q0v?6CiQFbji#xYh2Q=6WC%5 zsaW{x!>3&^Cb~!maLWWOX%6BXcS4ZXr!vD>Zfb7Ir8-M2nLDpQGL?5Mp}bD$l7ADH z@eel%T0xaa;$!9BCY{^pbEhjbR;&xKm1BRync9QK&k<7l(JmSVR1yz*W(T=;e2Gj* zOgNPu*cj9e&#}1Spm%u9dRna->6(W|Tzr=_TA`kHQ9UuarhinW ziJqQv@7Ce&Zi1G0hMu14W&&IR6@U5JAI_plklnJDP)L&zzv42M*`Y`a@CLc!E>ZN!w80S*@7Es4$gvxW5+1ORxy2V~m zdZ=+d%xX2tO>sy?Ls~xL@_a|k*{R&Dp=p%6DCim$RIOZpcTmt2w;T0S7a&*AO1*d5 zt1{*><~2gY?5KLX;jgwJTZ=O33u`lqSICum{P@%5cyQMr_HM2R!++bNH9IUNTd8xa zq5msUr$pgdF=Z3yTIB^p4CZOHX)baxbjZ{)1ral#vY>aSn57s|jKLB*4^V(8Q~y%J zk*@zU<=vJE&sy1GB_^h@I&eG8u^&_s{TdfZ7a3aM`Er?}SAxEwP9 zw{s3+&RoY7YQ{}1fFD41vqX(qk2rePI%@(BU9?(Vv$S@c!m<@N!kA<}H)RMBN+iK< z4_v$)gUzTTy6C8tf(c!X)$4ZpwGW&yr?_*uL;n9ikNS6)qkn#{KfE1WT;07NT=k3G z^Y@&2+3%t^xl=Oxg#C}O9KkdX5J^*5dCbAF)Hb~UPM1upFJ161s&h+}$YuwUTnvi0 z7Xc*?-Db1gsmp6&XjS71KUA;h)z2{SkK5au@!ien`k((Qrk}Ug3A`DkB~r4M!zv7J z&)+uerG@Il^?%*C_wnxP`orB-|4aX>D%E_IX2~=p4S=%ksIE(h#uD52+Sr#)g<-Hi z{4y9_4?p#Xw_5CPE^hx(vNHu~C(cSWHH$*g(S#fS7L0D5pv_+LDhK*vyVq>z%s>kf z#k#!TuOfuI#;I4F3E?(v;4-_Ka=^|tb(aP9VZA0Ln19t#-iE2eMfE8-jN_MOHG$mn z@3S8kg#s8mQH*y%w{r4V}h*>RB zH^XQ_x__aNKfQy}M}Qvl`dVn>mA9-1lXg2dFB~Qj(<5AGj+WQl)ZDFLc3L@#P4Pw^ z^LDvBnY_*Fus3$fSAmVlHY^F7vcH)WTY2uSNMy5}3)AWe4qm%_<CZ+A!mwx-+$%B#duhAyi8-)3 zLV2U+MM!gXj(X7d=imXyu0C>k4|J3|%V{ATI0SUzH1W)SXVHUfR9 zek?hbFbi)6P@XuUGjrZPa=x;67i9TJzmGTyOOlcZcc~oBjr3J&Mhmz&RExe^wP@kM zUZbEhWP$OtpcGukQ3Md*FOV(~y&3dUL4U0Eb)*SEhl*%bk7@v;UClNHq>9B|OGk4x zhemUDucnM^eIwEe*+uTDxiCUGK)rwyIt%ZQA#^sFQMP*Ae*insx3s2qmPrP0fqNpG z-m#3W>Oc4#!dpWMk~iUPIMUqwkZOkE4q)y@|Ch5DrCwdySX@J=IYI; z+064Wb#g)AW_qfd>I*f*JQ;~Cm9J2G7sQ2`+9+ib1*HMmUjNl{pWFE`Z+!KTBcKXI5jf&Fcrk14Y7#G z-IxAoJh&e2L@aY7R4K9S$U6HNT4&?fI(r#hXWP-OOD^YV7P$jugvR~byH6MYyt}^X z4|S&YZut4rPGiemx>ZV3(R&stuNs}+V0`iJs(<(Srgw4M@4efJR^~>qQh#C}Kfmi= z^>6RWGu=r-CgvS3sTrJ`lOoC=vjVX$C89u@S&(dhOIPYtz|zbErf`9@H(`lE)}7wS z)MLl^UZMbkQ3Vm3EdBbJ(`yEJx#bXYf7o*CU_Z_P=PpWv+JC!#iyoiF~7UNp8 zzBhe0m=P+PrD>TWCca5|MBF*`Mb@K8g6?Vq3smuN{^JGCf0%V{Uw7TITx}myx3pa&&iRi&DJT4Hc-`yY4KF_Rizx4U(xk$<4$bqoIMGp#c+FXk z@9%6(mD43ALRjHkNm`I*AfpmEA+Z$a1xePv!?_p_SeIvN_mB_h898gQD%X@o2%ccY-%^LsehO)%~~Xg%8SgsPGfY*r0Fpya3MRJQOaOq6~Eu_ z39U>tqws9jwtw?yB_|^K(UL>FqdeYGmZeWkVEy%%oUKGD#e8W( zTR*I|p4nQ~%ACC_$6Wt=&R>?F|KSsCTYI?#e#7~{)1%X)?EUX&ox|@xy-3Rlulnc> zb1|v@;fC<=pZS@m5pyARKuAO5K+YK_zv~q$ac*$!sUh7n^cO66kiaQ+lb7l0i;s6d zKgg9@cYpPdLtvliBY&cceyP`g)tC}K^DlLOb;d9B{z9My@QDVPfvmqpb(aYJf9*c2 zf0%hv|8mrt54ztD1oa)Pm8qJuO+~cQXE%!|hY+@lg{U~rierKWP`Sg*g`;_k>V6ry z&BL+JU2n7D}7B{Mwp?q;@~@ z8%+KUwhXn_3}u`;y;F-_fo-mcuW}?&S(`9ipEr5Wv0sr`teN++kM~VYZFr5(e}b8< zU?TQxWuHZ%X@1@?OK8VY6x-G38PpfIPD;LC+BWS+!2b)3Qf4A58>GG3BlSf}zt(DU zqJPq)morM?&zhA8c_`DrmQ2VVBzU`d`5#zgcM_B;GX3~?xU;CBRo(wT=Ms<&`~UXQ zQ73!<&+*CGVgLUUZNvWmMHd2nX5f2T0$$1RC+FdNG5N`h6JEsF7YM__%v58{|6j$Ncz-=U*MEAJq_mdB%^}nZWZ~wR732e0gC)wYB u?wp*p5BC2h+N;_B*r7aF{)6Q|v_m_zLp!t=ZvPhm0RR7kdsZO;ssI3OWYZD= delta 5956 zcmV-K7rW@0FWoMXPJc+N8NaYCCDp`{J(81a|2H=th+IjiNq`4{igiAJ_ul{@#b=3< z6+3Okyog0Eb{F7cvDgQ2;dE~TJ7WZY3B#j+8+z$S0E(jKeV1rD}UR$|Bws&YL}k2OQ9a%O+?n8OJj~m*W94(o>_@gD%R*RBe&P_}ZO5 zT&pWP{}Yl3ZJEsSw1HV zEFl`xWq-4#@81NjJ2u>O(J#N&-XIH^_03y`Z*Tb#mIh>qJ*d?L@CC8q+_qUjr5azG z1D4Q5k_Uj^pr2J&)ZcGW2#c@*7U(N+UDapP#6eTW(UJuXGzO`LV~Sk?h)mT@^c7Qm zGZ3JDbHu*VhN8!)6-R2Mxy z<u4~r(BtJv_M#NG`@GRR6gamHcoJeA0S5Y5>9VlomZ}1?|OmV8lllea5 zGN9_AWQYe8ttK790#QaeV-ii4NPv`TN`GTD={34bgH2JGED=0l&vyZJ)$a^Q7dsL&Yz0$8$)!?HmwLS7S=~igDKJYvG>eT>4lR6aLl>zR zVwv%)Ath_+E-(-&^;`>sIFHE&Rl4uHI%0%s2);ezG|(o)%y=bND)1N=@e|)MfD*Y z+jDRN7wVPphm*rs!uZXpSkE9+(@Se!EvehDrjYd00d)z5FU$=*_z25$+kXZjekO80 zaDYlJ4$&m{@WW?{7nrz8J9SaJNadVVQ#nUDr2$l7y>QuN8VI=I>|wd$bdtKO5fUmS zcHIbm1sVgd{2TRlExpWx`rpmo z(6kJ-`Dfq35_%W8PF-Sq4u3aT&bz4TY11A1O26L{55^LE{(x$b9!cn8PuX8Lz&#>_ zv4&dj-o4A~-WSz|rMsS2R>03Cf`6w|@fAyZ?y%Xn?a@w3xKJ0F`E;&W|D<;*M(%1; zz%t-k7N+6@XMrE>>PCQ_pE;3mO>Iyr;Qjb^>B9=w2arZjj3rikC4VD}HDmX7QwDy+ z7NnuoSLjzTHncM0&S#*QsVg3X==V67PRTg9p_crAl3pU4PVEVBU1eq#53+={YSq>mmmx1k+kg8Fenh&Pr)&HBErZK`$ip z2K~ac4UXMk(O{~L==1ZQn(WH(G8-7ZgW?tzg3Ks5$io!Rlpz@6MZ{QugzEM}?aL%k zwMoQ6-wLgGO;D;`s8G8~4KnDVuSCu@*mhJy5`&(SzYv%1)_-ezwGLO+E9?K2;Q20= zAezJL+y1yAALO%XY>NLJx7r#1@5$-O$-)2o64$L#H|t_LO0~?wou~u8%a))WDB4jG zYL!N zV#y%0EDUDq)7D}Bzr^+U*!(M65U;BZf+=x9E`2!j)Ueod=%T+ipPp(p4c33~F?F<^Kts>3 zHZ63@fQEZ~w2%W+LIKsay{F*n$ci7~z?GK9sRfFt#+w>%7z!ionkeNJS!tehY}{sm}BGHZPbbx*B~)5pA#yl zsQxEm{Yli*9WZ}~yF>Jcc9|(HRsVl=RKKh5(i!G?Oqpm#v57J!BLL%rHrR@FU>7|; zqG)cX-hWL46*kfvVrrX)S?yWQvhkOf@#`k|R1XQY-M}g9O$lc<^L$42W+CBT&%^nc zON&jn?s;hCT9akE?@^E=;(Z#u`SVrIL@~lu`h&ujYw`#BEnqS=s!vZfwbyBgc~gsF zQ5Qn_Y$VwUXHqgQVICvN{)Ihv4aACY>dl&I?|%obyNb(A8IK+M0u{e!KMtKGUUtl3 zHom3^=VETU5yx}}Yulk!SWXRZNpI2}du^TjJzeGNe?g37$Fz|3>wic6ORxW(*1OZ= z!}|Xc*FB+57xlDZ?*WyyWI`DBebHRBYf;EEcIK$L7AcchJZQK)7^u+oTxP=8P`Fwc zcYl=*nr=diP=48PSIfm8h!W}mmFQT33mo?j){tjuT; ztN9f!X8F`KjOJfRJ~8mxxtQW)3tp-Y+~0CdVy&HZW;G&DR@Q2#DgybqPA8GK+LKZ(?(}lzWkbg#<$Qp>-BoYp~1xsh9*&irLHPay@fa<39 zuNAzc$gP_oA31uHwAYi#jauooC_SC@)n7fWOUByN{ZnHgr4OZssF zTZ|#)3tzqXvnV==rK^)^w2-5mcX6VaJjZL{!XNe_a=M_k%;*KSh*MA9J za=~Au`5BK}$SCPfv9-0DrE4iu~-4W_le!2=kdpih&yTy0tj#)N?nr{fZrj zgAivw{&e2!jr!y9UkU(Szz%Q=!pyME5EWdJ1xS>nvOu1-JFS)|cn%MeW6Pw$S-)Cj$CXp?=Nj&1r(zIkObg)qeFLM%N;@>7{9wi~i zm*HOV1=4Z|Pdtv}leSh9r`6E^N+dmz;#Ex9cz={u5x5Y8xf^Xy zixzY3oU}~p!3?M@Xq`zLDMl1yu!Q!w7a+=Hnv`&)GwnJ-fho+x+YWPV zaa2UV#u(A*d=_}VT&C!ipfAh~%wYBz&+D6&rEIeKst8Tt@*IL>p81?fW;3^oZZB^V z&k7US%r(Wt4LID+Ie&;ba~+dU7+*@Ue*oFdP}0gBt@M1B52G=SV*fq+a81vvEer^$ z-9N!@4_v$)gUzTTy6C8tf(civ)#G;hu@9Uur?_#sP51vlkNS6)qkgYHyd9ih-Mt@N z^@}XL_ndjzG1L2`DVcr3{zq7jV44SrUT>JZ%E7VJPOkt?XMf$QaV~fkrAj5TEVCU+ zYPn(yHK637+iaHG1>xF6A*(LpBr#O4XWv}Jz&~zpZ^m~wqw9bEtC)UX4JGhqkQOcw z8#t`O;P(7&!(LjbK3w08dmrzvu0Pye^}qD5s#47-`jrd={1(qRrL<68XQ7NGw(qqT zD;)~MV1M{!Fn_upe(DczwbH1!fo2XWp_*!^Sx#EbUlta*?A=#Z zh_}#Z6+!OqBYP&TY8!;AUa&$R+DmxG{%p-LbzHEF0<#DW1X#d1<7YZ){LioJe2T{Y zd*5xxtd>kS!)SrWx{yD;HPBTgkNNppsN$7ftUHr7J2x*J#+TnCTn8VP*W9FStzdRq zIf_;BMt>gjdbvCvxy|aZH+ITbfsM#EED4(eh?yu&dHAMCWV4M6BeMw(UONNgfBUMC z{}Wo{yB`U&DgXcE^fa6Q({3Hke|eG1vaD3fk1#p=jr+fu+{JdyWC&f32d0E?eKJtm zyO&Av*&%laONs6I2!0C)2Pdv`^qDiDQ+7?0HGgcm%C!VzVfd?&f-o$a)$)o=!Cu-f zd}0o)O6YFXxCm*EPEj}d{v15u*wrO>_drLfT0RTmz#*U$XR%MZ&8A+t&U?2Cw*O$y zF`dD8r1L7wf65$q)`3v#=l@ab^xeBm{{Op^Cbu*UUyWGM8(z8^a?}tmr%v%U6^Akhf9pmVeDT z88b&OMwiVz4^t=S!EL68x~bk!L(G$I)Kd8hrFTJGn5m6Y#>-#oP3`qB2Is@O_oM6K zZGYG+_Nvw?ql8M}*p_6GOHwqL2E*~~`PJ3k#ntu2`_JRT*l~L7wM?hZ@M&bu;VEz; z8)6ZWyD$CGcyK-3iCE@Dpi*Mlo_}@r(Y4OTzIFD}xz4t|TNfG5(JV3t$_S17w|Ae; z|9N+P(;w)r74r=9wiJ9Md(rlR*OQeHJWy}|hW;;Mi5`KEV%+wWcML@RS5R4K8K zpD+4X{oA|pFm;lUaae~-Y6j=#h=#JqtU#=bXDE zkagMnnS9(B-%BKCZW*ROBn$tiBn#`jF3V>;mA0rSd({jug=cZrsT?EJ$al@fEk0Db zg1i+R>b760+ejcU2}MnIl)!I6eS2w)!}vL{P@%70nvTi)SkBLW{HZq@eCdzwM*Rlw;0!&^}Xr4!HiJREDg&PG4V}u8{)>PH?kf@5_DG`STGe2RS7Rpm0;GnebpnR zx5LSn>AlKYBxZ+_!Ik5=+CHXkX}w0Avmbv_M)=+Ey4SxOo`32WQGedG{-Hv(f#&&J z)ESf`UQ=Q4{Z(vKsd+FS!V1+6X+fHSjPlon#8R9U1euG@MJ-`eAL*+tN+Gf5TQ;`m zpfqHj)w0uni!p1JYWeX|z&V83vx3(RCDF%0M;*V1rG@p0(AHNX=N555EvYg;ud1CT zy>`I$DZ8dSCDS$)Y=79TuE`8YX+_ibU1hS>?&45i^7{29IWJ~<8QY9Cy_y{gSSs^c0Y`))kK^&cZt+iYs3%ZRqw7*@2P#Td(v+G~-JCe%E*?+G@9?FYey}YP6reB^r z5;2}+j`^~=`c7d}n~6>RVXic5ksvB7GWR%*(Iu0n$DF{0>}W~|YRa!hs&AdY$;Oo=@Zf}-tkitsqLv?Y?tsmA}%WN%c zWyW5W8rA=v;>q&*A3njhwUi^pBT7QS{|GdbR5nlDu8|Gk2eQKevqqa-PIol zz&_C>f1-tim=ZSgFLi%a;}=?gAADNuts=p}Rh>@}8r=BC%LA?tf)3@0+UH@C=^+1T|YhMeJG2K8r%r zeBLfgXvba@>(%Ee)EC!IO1@uOH|0e6*WDgR&-MIV@tg$-@N)?%Yd^{XDRB%<@|3BvtkPZ9)_R&%9`(MXLr-%Lj zOMhG&_Wv)s5a^nL?`a8mCB>f{hwnw@Cl5||5oKQ>3 Date: Mon, 6 May 2024 16:41:30 +0000 Subject: [PATCH 147/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.8 to 0.0.9. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.8...pkg/azclient/v0.0.9) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 40434c1cb..cb4e2caa9 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 3c8f03294..796026b3c 100644 --- a/go.sum +++ b/go.sum @@ -469,8 +469,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp4 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 h1:dAvrFZFLIVoOXTSrU5jTLyUdNlyaExrBqD2YvyRjpI4= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15/go.mod h1:gd4xk5narc09IQvDR8xyr5FyDmB0Y49+bLOUlMaGecs= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 h1:Ix/gpEadY+rOsC2lEEP60g7WpdTOxzdOEpS3+xMkopE= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8/go.mod h1:b0gqf/Z49wro1afaTPgXZkVwJGTihnDSe0c80LYQ5JE= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 h1:dqNj+AISy41WMcXy1tNsq17+be/HZfqMKu3DLKB43Jo= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9/go.mod h1:QMXIQlVLjr0r894neYyP/PV0kdkAI7eUJ3Gwrk8mKFs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index ed9add388..5f7afa207 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1663,7 +1663,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.8 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From 9f5f8f80ea6c4ccce70af3dd777ab68cc813027e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 16:22:01 +0000 Subject: [PATCH 148/157] chore(deps): bump google.golang.org/protobuf from 1.34.0 to 1.34.1 Bumps google.golang.org/protobuf from 1.34.0 to 1.34.1. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +- .../editiondefaults/editions_defaults.binpb | Bin 78 -> 93 bytes .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/filedesc/editions.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 46 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/reflect/protodesc/editions.go | 6 +- .../reflect/protoreflect/source_gen.go | 19 + .../types/descriptorpb/descriptor.pb.go | 636 +++++++++++------- .../types/gofeaturespb/go_features.pb.go | 18 +- vendor/modules.txt | 2 +- 12 files changed, 492 insertions(+), 258 deletions(-) diff --git a/go.mod b/go.mod index cb4e2caa9..d543eced9 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( go.uber.org/mock v0.4.0 golang.org/x/net v0.24.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.34.0 + google.golang.org/protobuf v1.34.1 k8s.io/api v0.30.0 k8s.io/apimachinery v0.30.0 k8s.io/client-go v0.30.0 diff --git a/go.sum b/go.sum index 796026b3c..87d8a00a7 100644 --- a/go.sum +++ b/go.sum @@ -405,8 +405,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= -google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index f691305eb4f73440cd48caba949023eab52ef304..ff6a38360add36f53d48bb0863b701696e0d7b2d 100644 GIT binary patch literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H literal 78 zcmd-Q6B6WL6kw8IQef6#G+?@9$Hc)X@r<1dB+ewjD8Z<}1Qcfki8Dw%hln$xi@#u3 Kc*d^rf*k;APzzxI diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fdc..f4107c05f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index d1e16a26d..11f5f356b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -108,7 +108,9 @@ func unmarshalEditionDefault(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: fs = unmarshalFeatureSet(v, fs) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 40272c893..1447a1198 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -21,6 +21,7 @@ const ( // Enum values for google.protobuf.Edition. const ( Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 Edition_EDITION_PROTO2_enum_value = 998 Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 @@ -653,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -667,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -767,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -1110,17 +1141,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index fc6bfc396..a3cba5080 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 0 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index f6a1fec6e..804830eda 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -62,18 +62,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) os.Exit(1) } - fs := defaults.GetDefaults()[0].GetFeatures() + fsed := defaults.GetDefaults()[0] // Using a linear search for now. // Editions are guaranteed to be sorted and thus we could use a binary search. // Given that there are only a handful of editions (with one more per year) // there is not much reason to use a binary search. for _, def := range defaults.GetDefaults() { if def.GetEdition() <= edpb { - fs = def.GetFeatures() + fsed = def } else { break } } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) defaultsCache[ed] = fs return fs } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 7dcc2ff09..00102d311 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -519,6 +521,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 78624cf60..10c9030eb 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -54,6 +54,9 @@ type Edition int32 const ( // A placeholder for an unknown edition value. Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 // Legacy syntax "editions". These pre-date editions, but behave much like // distinct editions. These can't be used to specify the edition of proto // files, but feature definitions must supply proto2/proto3 defaults for @@ -82,6 +85,7 @@ const ( var ( Edition_name = map[int32]string{ 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", 998: "EDITION_PROTO2", 999: "EDITION_PROTO3", 1000: "EDITION_2023", @@ -95,6 +99,7 @@ var ( } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, @@ -2177,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2679,7 +2688,8 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2811,6 +2821,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet { return nil } +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3968,6 +3985,88 @@ func (x *FieldOptions_EditionDefault) GetValue() string { return "" } +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3985,7 +4084,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3998,7 +4097,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4037,14 +4136,17 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4057,7 +4159,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4080,9 +4182,16 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { return Edition_EDITION_UNKNOWN } -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { if x != nil { - return x.Features + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures } return nil } @@ -4188,7 +4297,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4201,7 +4310,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4275,7 +4384,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4288,7 +4397,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4702,7 +4811,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, @@ -4743,18 +4852,41 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, @@ -4898,177 +5030,187 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, - 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, - 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, - 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, - 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, - 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, - 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, - 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, - 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, - 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, - 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, - 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, - 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, + 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, + 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, + 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, + 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, + 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, + 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, + 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, + 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, + 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, + 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, + 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, + 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, + 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, + 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, + 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, + 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, + 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, + 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, + 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, + 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, + 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, + 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, + 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, + 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, + 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, + 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, + 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, + 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, - 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, + 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, - 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, - 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, - 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, - 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, - 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, - 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, + 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, + 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, + 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5084,7 +5226,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { } var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState @@ -5131,10 +5273,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto @@ -5179,40 +5322,45 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet - 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 71, // [71:71] is the sub-list for method output_type - 71, // [71:71] is the sub-list for method input_type - 71, // [71:71] is the sub-list for extension type_name - 71, // [71:71] is the sub-list for extension extendee - 0, // [0:71] is the sub-list for field type_name + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 76, // [76:76] is the sub-list for method output_type + 76, // [76:76] is the sub-list for method input_type + 76, // [76:76] is the sub-list for extension type_name + 76, // [76:76] is the sub-list for extension extendee + 0, // [0:76] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5578,7 +5726,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state case 1: @@ -5590,7 +5738,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -5602,7 +5750,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state case 1: @@ -5614,6 +5762,18 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -5632,7 +5792,7 @@ func file_google_protobuf_descriptor_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, NumEnums: 17, - NumMessages: 32, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 9f046f979..b0df3fb33 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,12 +90,18 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x0a, 0x47, 0x6f, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x1f, 0x88, 0x01, 0x01, - 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0x52, 0x17, 0x6c, 0x65, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, + 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, + 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, + 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, + 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, diff --git a/vendor/modules.txt b/vendor/modules.txt index 5f7afa207..36b29750d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -710,7 +710,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.0 +# google.golang.org/protobuf v1.34.1 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext From 8c101a156e96ec5816277ef742815678bc79ac71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 16:30:28 +0000 Subject: [PATCH 149/157] chore(deps): bump golangci/golangci-lint-action from 5 to 6 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/static.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index f82e447c6..e3e01eb6b 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -13,7 +13,7 @@ jobs: go-version: ^1.19 - uses: actions/checkout@master - name: Run linter - uses: golangci/golangci-lint-action@v5 + uses: golangci/golangci-lint-action@v6 with: version: v1.54 args: -E=gofmt,unused,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s From 08035524e427a47651fb733b941048e5e63a59da Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Wed, 8 May 2024 03:25:57 +0000 Subject: [PATCH 150/157] fix: support storeAccountKey as true in cross subscription scenario fix --- docs/driver-parameters.md | 1 + pkg/blob/controllerserver.go | 9 ----- pkg/blob/controllerserver_test.go | 65 ------------------------------- 3 files changed, 1 insertion(+), 74 deletions(-) diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md index 5525f2263..74f58ff6f 100644 --- a/docs/driver-parameters.md +++ b/docs/driver-parameters.md @@ -98,6 +98,7 @@ pvc-92a4d7f2-f23b-4904-bad4-2cbfcff6e388 Name | Meaning | Available Value | Mandatory | Default value --- | --- | --- | --- | --- volumeHandle | Specify a value the driver can use to uniquely identify the storage blob container in the cluster. | A recommended way to produce a unique value is to combine the globally unique storage account name and container name: {account-name}_{container-name}. Note: the # character is reserved for internal use, the / character is not allowed. | Yes | +volumeAttributes.subscriptionID | specify Azure subscription ID where blob storage directory is located | Azure subscription ID | No | if not empty, `resourceGroup` must be provided volumeAttributes.resourceGroup | Azure resource group name | existing resource group name | No | if empty, driver will use the same resource group name as current k8s cluster volumeAttributes.storageAccount | existing storage account name | existing storage account name | Yes | volumeAttributes.containerName | existing container name | existing container name | Yes | diff --git a/pkg/blob/controllerserver.go b/pkg/blob/controllerserver.go index d976a3920..17fc6f489 100644 --- a/pkg/blob/controllerserver.go +++ b/pkg/blob/controllerserver.go @@ -232,15 +232,6 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("matchTags must set as false when storageAccount(%s) is provided", account)) } - if subsID != "" && subsID != d.cloud.SubscriptionID { - if isNFSProtocol(protocol) { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("NFS protocol is not supported in cross subscription(%s)", subsID)) - } - if !storeAccountKey { - return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("storeAccountKey must set as true in cross subscription(%s)", subsID)) - } - } - if resourceGroup == "" { resourceGroup = d.cloud.ResourceGroup } diff --git a/pkg/blob/controllerserver_test.go b/pkg/blob/controllerserver_test.go index 13cdb1dd2..1c9c57d95 100644 --- a/pkg/blob/controllerserver_test.go +++ b/pkg/blob/controllerserver_test.go @@ -431,71 +431,6 @@ func TestCreateVolume(t *testing.T) { } }, }, - { - name: "NFS not supported by cross subscription", - testFunc: func(t *testing.T) { - d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.cloud.SubscriptionID = "bar" - mp := make(map[string]string) - mp[subscriptionIDField] = "foo" - mp[protocolField] = "nfs" - mp[skuNameField] = "unit-test" - mp[storageAccountTypeField] = "unit-test" - mp[locationField] = "unit-test" - mp[storageAccountField] = "unit-test" - mp[resourceGroupField] = "unit-test" - mp[containerNameField] = "unit-test" - mp[mountPermissionsField] = "0750" - req := &csi.CreateVolumeRequest{ - Name: "unit-test", - VolumeCapabilities: stdVolumeCapabilities, - Parameters: mp, - } - d.Cap = []*csi.ControllerServiceCapability{ - controllerServiceCapability, - } - - expectedErr := status.Errorf(codes.InvalidArgument, fmt.Sprintf("NFS protocol is not supported in cross subscription(%s)", "foo")) - _, err := d.CreateVolume(context.Background(), req) - if !reflect.DeepEqual(err, expectedErr) { - t.Errorf("Unexpected error: %v", err) - } - }, - }, - { - name: "storeAccountKey must be set as true in cross subscription", - testFunc: func(t *testing.T) { - d := NewFakeDriver() - d.cloud = &azure.Cloud{} - d.cloud.SubscriptionID = "bar" - mp := make(map[string]string) - mp[subscriptionIDField] = "foo" - mp[storeAccountKeyField] = falseValue - mp[protocolField] = "unit-test" - mp[skuNameField] = "unit-test" - mp[storageAccountTypeField] = "unit-test" - mp[locationField] = "unit-test" - mp[storageAccountField] = "unit-test" - mp[resourceGroupField] = "unit-test" - mp[containerNameField] = "unit-test" - mp[mountPermissionsField] = "0750" - req := &csi.CreateVolumeRequest{ - Name: "unit-test", - VolumeCapabilities: stdVolumeCapabilities, - Parameters: mp, - } - d.Cap = []*csi.ControllerServiceCapability{ - controllerServiceCapability, - } - - expectedErr := status.Errorf(codes.InvalidArgument, fmt.Sprintf("storeAccountKey must set as true in cross subscription(%s)", "foo")) - _, err := d.CreateVolume(context.Background(), req) - if !reflect.DeepEqual(err, expectedErr) { - t.Errorf("Unexpected error: %v", err) - } - }, - }, { name: "Update service endpoints failed (protocol = nfs)", testFunc: func(t *testing.T) { From deab781132e71d91df2b827a5984ac2904c038da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 May 2024 16:56:56 +0000 Subject: [PATCH 151/157] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.17.2 to 2.17.3. - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.17.2...v2.17.3) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 5 +++++ .../onsi/ginkgo/v2/ginkgo/watch/package_hash.go | 9 +++++++++ vendor/github.com/onsi/ginkgo/v2/types/version.go | 2 +- vendor/modules.txt | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index d543eced9..c5701d4bd 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.4 github.com/kubernetes-csi/csi-lib-utils v0.16.0 - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.17.3 github.com/onsi/gomega v1.33.1 github.com/pborman/uuid v1.2.1 github.com/pelletier/go-toml v1.9.5 diff --git a/go.sum b/go.sum index 87d8a00a7..1f325d51c 100644 --- a/go.sum +++ b/go.sum @@ -211,8 +211,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= -github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= +github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 0a8949799..13be7ec6a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,8 @@ +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + ## 2.17.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc..0e6ae1f29 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 5dd0140cd..52cc3abc8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.2" +const VERSION = "2.17.3" diff --git a/vendor/modules.txt b/vendor/modules.txt index 36b29750d..7fef1da42 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -350,7 +350,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.17.2 +# github.com/onsi/ginkgo/v2 v2.17.3 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config From 351506d02609d3ba4faef106ad437bb34362f4ee Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 9 May 2024 03:43:07 +0000 Subject: [PATCH 152/157] test: fix stdlib CVE due to golang v1.22.2 --- .github/workflows/trivy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml index 9690bbb29..5319c34fc 100644 --- a/.github/workflows/trivy.yaml +++ b/.github/workflows/trivy.yaml @@ -12,7 +12,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v5 with: - go-version: ^1.16 + go-version: 1.22.3 id: go - name: Checkout code From 178ec4b347d224f032a74271849d4551dc7c9840 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 May 2024 16:23:25 +0000 Subject: [PATCH 153/157] chore(deps): bump sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader Bumps [sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader](https://github.com/kubernetes-sigs/cloud-provider-azure) from 0.0.9 to 0.0.10. - [Release notes](https://github.com/kubernetes-sigs/cloud-provider-azure/releases) - [Changelog](https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/release-versioning.md) - [Commits](https://github.com/kubernetes-sigs/cloud-provider-azure/compare/pkg/azclient/v0.0.9...pkg/azclient/v0.0.10) --- updated-dependencies: - dependency-name: sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index c5701d4bd..39d23c54c 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( k8s.io/mount-utils v0.29.4 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c - sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 + sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 1f325d51c..f1a8ed222 100644 --- a/go.sum +++ b/go.sum @@ -469,8 +469,8 @@ sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp4 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 h1:dAvrFZFLIVoOXTSrU5jTLyUdNlyaExrBqD2YvyRjpI4= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15/go.mod h1:gd4xk5narc09IQvDR8xyr5FyDmB0Y49+bLOUlMaGecs= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 h1:dqNj+AISy41WMcXy1tNsq17+be/HZfqMKu3DLKB43Jo= -sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9/go.mod h1:QMXIQlVLjr0r894neYyP/PV0kdkAI7eUJ3Gwrk8mKFs= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10 h1:xRjihqpmd3rFPWcNlaQ7anxNpOyf5PPQPbwKNzAfaTM= +sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10/go.mod h1:/0IfKCHKIGzBFDwtMCZ/Jl2DrCJjWqqfN0BvN3+9LcM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/vendor/modules.txt b/vendor/modules.txt index 7fef1da42..09134d264 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1663,7 +1663,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient -# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.9 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10 ## explicit; go 1.21 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader # sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd From fc4669bcd7011ff87d094996356449013b9dc2f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 16:19:19 +0000 Subject: [PATCH 154/157] chore(deps): bump golang.org/x/net from 0.24.0 to 0.25.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.24.0 to 0.25.0. - [Commits](https://github.com/golang/net/compare/v0.24.0...v0.25.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +- go.sum | 20 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 14 +- vendor/golang.org/x/net/html/doc.go | 2 +- .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/frame.go | 13 +- vendor/golang.org/x/net/http2/server.go | 11 +- vendor/golang.org/x/net/http2/transport.go | 15 +- vendor/golang.org/x/sys/cpu/cpu.go | 1 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 10 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 8 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 665 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 ++ vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 + vendor/golang.org/x/sys/unix/epoll_zos.go | 220 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 163 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 2 +- .../x/sys/unix/readdirent_getdirentries.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 + .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 + .../x/sys/unix/syscall_zos_s390x.go | 1507 ++++- vendor/golang.org/x/sys/unix/sysvshm_unix.go | 2 +- .../x/sys/unix/sysvshm_unix_other.go | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 9 + .../x/sys/unix/zerrors_zos_s390x.go | 233 +- .../x/sys/unix/zsymaddr_zos_s390x.s | 364 ++ .../x/sys/unix/zsyscall_zos_s390x.go | 3113 ++++++++-- .../x/sys/unix/zsysnum_linux_386.go | 5 + .../x/sys/unix/zsysnum_linux_amd64.go | 5 + .../x/sys/unix/zsysnum_linux_arm.go | 5 + .../x/sys/unix/zsysnum_linux_arm64.go | 5 + .../x/sys/unix/zsysnum_linux_loong64.go | 5 + .../x/sys/unix/zsysnum_linux_mips.go | 5 + .../x/sys/unix/zsysnum_linux_mips64.go | 5 + .../x/sys/unix/zsysnum_linux_mips64le.go | 5 + .../x/sys/unix/zsysnum_linux_mipsle.go | 5 + .../x/sys/unix/zsysnum_linux_ppc.go | 5 + .../x/sys/unix/zsysnum_linux_ppc64.go | 5 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 5 + .../x/sys/unix/zsysnum_linux_riscv64.go | 5 + .../x/sys/unix/zsysnum_linux_s390x.go | 5 + .../x/sys/unix/zsysnum_linux_sparc64.go | 5 + .../x/sys/unix/zsysnum_zos_s390x.go | 5507 +++++++++-------- vendor/golang.org/x/sys/unix/ztypes_linux.go | 26 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 8 - .../x/sys/unix/ztypes_linux_amd64.go | 9 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 9 - .../x/sys/unix/ztypes_linux_arm64.go | 9 - .../x/sys/unix/ztypes_linux_loong64.go | 9 - .../x/sys/unix/ztypes_linux_mips.go | 9 - .../x/sys/unix/ztypes_linux_mips64.go | 9 - .../x/sys/unix/ztypes_linux_mips64le.go | 9 - .../x/sys/unix/ztypes_linux_mipsle.go | 9 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 9 - .../x/sys/unix/ztypes_linux_ppc64.go | 9 - .../x/sys/unix/ztypes_linux_ppc64le.go | 9 - .../x/sys/unix/ztypes_linux_riscv64.go | 9 - .../x/sys/unix/ztypes_linux_s390x.go | 9 - .../x/sys/unix/ztypes_linux_sparc64.go | 9 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 146 +- vendor/golang.org/x/sys/windows/aliases.go | 2 +- vendor/golang.org/x/sys/windows/empty.s | 8 - vendor/modules.txt | 10 +- 65 files changed, 8997 insertions(+), 4298 deletions(-) create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go create mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/windows/empty.s diff --git a/go.mod b/go.mod index 39d23c54c..325b63a97 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 - golang.org/x/net v0.24.0 + golang.org/x/net v0.25.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.34.1 k8s.io/api v0.30.0 @@ -127,13 +127,13 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.20.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index f1a8ed222..ad51a3ea0 100644 --- a/go.sum +++ b/go.sum @@ -326,8 +326,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -344,8 +344,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -365,21 +365,21 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 34bf089d0..9486c5986 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -404,10 +404,10 @@ func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, e return false, err } - return confirmKeyAck(key, algo, c) + return confirmKeyAck(key, c) } -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { pubKey := key.Marshal() for { @@ -425,7 +425,15 @@ func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { + // According to RFC 4252 Section 7 the algorithm in + // SSH_MSG_USERAUTH_PK_OK should match that of the request but some + // servers send the key type instead. OpenSSH allows any algorithm + // that matches the public key, so we do the same. + // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 + if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + return false, nil + } + if !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d9..3a7e5ab17 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e852..9b4de9401 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e..105c3b279 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40e..c5d081081 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -732,11 +732,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -1482,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c7..2fa49490c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -936,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f7..8fa707aa4 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993bf..0e27a21e1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index fcb9a3888..22cc99844 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -29,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index a8acd3e32..6ac6e1efb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80fa..3d386d0fc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 2f67ba86d..813dfad7d 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -9,9 +9,11 @@ #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -19,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 000000000..39d647d86 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 000000000..4bd4a1798 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index 7753fddea..000000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index c8bde601e..000000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 4d0a3430e..0482408d7 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 130398b6b..b903c0060 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 000000000..3e53dbc02 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 000000000..3c4f33cb6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 27c41b6f0..312ae6ac1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -4,11 +4,21 @@ //go:build zos && s390x +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. + package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -17,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -65,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -74,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -88,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -146,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -155,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -177,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -190,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -204,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -212,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -220,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -236,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -272,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -317,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -332,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -360,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -403,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -520,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -571,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -600,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1191,62 +1969,41 @@ func Opendir(name string) (uintptr, error) { if err != nil { return 0, err } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) } - return + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1261,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1307,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1372,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 + } + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) } - return Fstatfs(fd, stat) + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1395,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1416,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1474,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1520,14 +2468,6 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { @@ -1786,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1904,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1918,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1964,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 79a84f18b..672d6b0a8 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 9eb0db664..8b7977a28 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36bf8399f..93a38a97d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -491,6 +491,7 @@ const ( BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -1697,6 +1698,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 @@ -1898,6 +1900,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2302,6 +2305,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -3168,6 +3172,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3562,12 +3567,16 @@ const ( XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_SG = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4dfd2e051..da08b2ab3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -10,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -152,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -165,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -182,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -205,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -248,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -285,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -378,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -427,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -452,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -476,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -700,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -743,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 000000000..b77ff5db9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 94f011238..7ccf66b7e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x @@ -6,17 +6,100 @@ package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS return } @@ -29,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -46,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -78,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -88,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -99,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -109,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -119,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -129,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -140,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -150,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -160,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -176,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -193,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -203,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -214,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -225,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -236,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -246,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -256,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -271,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -286,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -301,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -316,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -331,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -342,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -353,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) -func unmount(filesystem string, mtm int) (err error) { +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -971,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -981,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -991,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1001,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1011,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1022,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1032,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1042,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1057,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1077,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1099,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1109,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1119,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1129,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1142,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1157,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1172,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1188,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1198,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1209,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1219,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1234,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0cc3ce496..53aef5dc5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -452,4 +452,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 856d92d69..71d524763 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -374,4 +374,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 8d467094c..c74770613 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -416,4 +416,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index edc173244..f96e214f6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -319,4 +319,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 445eba206..28425346c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -313,4 +313,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index adba01bca..d0953018d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 014c4e9c7..295c7f4b8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index ccc97d74d..d1a9eaca7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ec2b64a95..bec157c39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 21a839e33..7ee7bdc43 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -443,4 +443,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index c11121ec3..fad1f25b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 909b631fc..7d3e16357 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e49bed16e..0ed53ad9f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -320,4 +320,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 66017d2d3..2fba04ad5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -381,4 +381,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 47bab18dc..621d00d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -394,4 +394,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index b2e308581..5e8c263ca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2669 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index eff6bcdef..0036746ea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1178,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1198,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -2481,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2935,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -3211,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -4595,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x149 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4861,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4965,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5199,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX = 0x1f NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 438a30aff..fd402da43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -477,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index adceca355..eb7a5e186 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -492,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index eeaa00a37..d78ac108b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -470,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6739aa91d..cd06d47f1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -471,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 9920ef631..2f28fe26c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -472,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2923b799a..71d6cac2f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ce2750ee4..8596d4535 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 3038811d7..cd60ea186 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index efc6fed18..b0ae420c4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 9a654b75a..835972875 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -482,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 40d358e33..69eb6a5c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 148c6ceb8..5f583cb62 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 72ba81543..15adc0414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -499,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 71e765508..cf3ce9003 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -495,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4abbdb9de..590b56739 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 54f31be63..d9a13af46 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -25,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -69,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -325,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -336,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -412,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index ce2d713d6..16f90560a 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index ba64caca5..000000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/modules.txt b/vendor/modules.txt index 09134d264..022b7b6a9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -542,7 +542,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.22.0 +# golang.org/x/crypto v0.23.0 ## explicit; go 1.18 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -563,7 +563,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.24.0 +# golang.org/x/net v0.25.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -586,17 +586,17 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.19.0 +# golang.org/x/sys v0.20.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.19.0 +# golang.org/x/term v0.20.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.15.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap From faaa947b247ffbad5e74c00db319bb6cbea4db53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 17:42:31 +0000 Subject: [PATCH 155/157] chore(deps): bump build-image/debian-base in /pkg/blobplugin Bumps build-image/debian-base from bookworm-v1.0.2 to bookworm-v1.0.3. --- updated-dependencies: - dependency-name: build-image/debian-base dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pkg/blobplugin/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobplugin/Dockerfile b/pkg/blobplugin/Dockerfile index b145383d9..a045136e8 100644 --- a/pkg/blobplugin/Dockerfile +++ b/pkg/blobplugin/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.2 +FROM registry.k8s.io/build-image/debian-base:bookworm-v1.0.3 ARG ARCH=amd64 ARG binary=./_output/${ARCH}/blobplugin From 8f46ea3b3afe21f6449cb211bf64e0ee92a6757e Mon Sep 17 00:00:00 2001 From: Stephen Mkandawire Date: Tue, 14 May 2024 15:16:08 -0400 Subject: [PATCH 156/157] Fix pvc_annotator_test.go --- pkg/blob/nodeserver.go | 2 +- pkg/edgecache/cachevolume/pvc_annotator_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/blob/nodeserver.go b/pkg/blob/nodeserver.go index cd5e5a4ca..4fbdc9b4b 100644 --- a/pkg/blob/nodeserver.go +++ b/pkg/blob/nodeserver.go @@ -29,13 +29,13 @@ import ( "sigs.k8s.io/blob-csi-driver/pkg/edgecache" cv "sigs.k8s.io/blob-csi-driver/pkg/edgecache/cachevolume" blobcsiutil "sigs.k8s.io/blob-csi-driver/pkg/util" + volumehelper "sigs.k8s.io/blob-csi-driver/pkg/util" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "github.com/container-storage-interface/spec/lib/go/csi" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" diff --git a/pkg/edgecache/cachevolume/pvc_annotator_test.go b/pkg/edgecache/cachevolume/pvc_annotator_test.go index 2b293cfc4..c57a0214f 100644 --- a/pkg/edgecache/cachevolume/pvc_annotator_test.go +++ b/pkg/edgecache/cachevolume/pvc_annotator_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/stretchr/testify/assert" + "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) @@ -152,7 +153,7 @@ func TestSendProvisionVolumeSuccess(t *testing.T) { testcases := []SuccessTestCase{ { name: "ValidWIAuth", - config: config.AzureAuthConfig{UseFederatedWorkloadIdentityExtension: true}, + config: config.AzureAuthConfig{AzureAuthConfig: azclient.AzureAuthConfig{UseFederatedWorkloadIdentityExtension: true}}, blobAuth: NewBlobAuth(suffix, acct, container, "", "", "WorkloadIdentity"), expectedAnnotations: map[string]string{ volumeStateAnnotation: "not created", From 120ad7bffddf9be18cfebd524e40bbca81f82ba6 Mon Sep 17 00:00:00 2001 From: Stephen Mkandawire Date: Fri, 17 May 2024 15:05:38 -0400 Subject: [PATCH 157/157] upgrade to sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.19 --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 7c51b2b06..0b18b448e 100644 --- a/go.mod +++ b/go.mod @@ -155,7 +155,7 @@ require ( k8s.io/kubelet v0.29.3 // indirect k8s.io/pod-security-admission v0.29.4 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 + sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.19 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 8c4ec0137..e210795ab 100644 --- a/go.sum +++ b/go.sum @@ -476,8 +476,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2S sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c h1:jiFjp43Ayaz78K+yK0bVrOVMvQ6QrIW/XTi0r3uAA5g= sigs.k8s.io/cloud-provider-azure v1.27.1-0.20240418020948-86cfc443b48c/go.mod h1:1vNSbzbKZwTZFc/kcVGFpcTjcys3qcRtPT3ZZtmg2WA= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 h1:dAvrFZFLIVoOXTSrU5jTLyUdNlyaExrBqD2YvyRjpI4= -sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15/go.mod h1:gd4xk5narc09IQvDR8xyr5FyDmB0Y49+bLOUlMaGecs= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.19 h1:G0PillnfWBYB26Yqa50ce0oU29aUkP95c3g3kis3CRk= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.19/go.mod h1:d5hbS2VV55CcaYd2WTKNjpRFgURgjNR18hkxXyo13OQ= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10 h1:xRjihqpmd3rFPWcNlaQ7anxNpOyf5PPQPbwKNzAfaTM= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.10/go.mod h1:/0IfKCHKIGzBFDwtMCZ/Jl2DrCJjWqqfN0BvN3+9LcM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/modules.txt b/vendor/modules.txt index 5b32d6fbf..af8d8de7a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1624,7 +1624,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/util/string sigs.k8s.io/cloud-provider-azure/pkg/util/taints sigs.k8s.io/cloud-provider-azure/pkg/util/vm sigs.k8s.io/cloud-provider-azure/pkg/version -# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.15 +# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.19 ## explicit; go 1.20 sigs.k8s.io/cloud-provider-azure/pkg/azclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient